sprd-dma.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309
  1. /*
  2. * Copyright (C) 2017 Spreadtrum Communications Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/dma/sprd-dma.h>
  9. #include <linux/errno.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/of_dma.h>
  17. #include <linux/of_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #include "virt-dma.h"
  21. #define SPRD_DMA_CHN_REG_OFFSET 0x1000
  22. #define SPRD_DMA_CHN_REG_LENGTH 0x40
  23. #define SPRD_DMA_MEMCPY_MIN_SIZE 64
  24. /* DMA global registers definition */
  25. #define SPRD_DMA_GLB_PAUSE 0x0
  26. #define SPRD_DMA_GLB_FRAG_WAIT 0x4
  27. #define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
  28. #define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
  29. #define SPRD_DMA_GLB_INT_RAW_STS 0x10
  30. #define SPRD_DMA_GLB_INT_MSK_STS 0x14
  31. #define SPRD_DMA_GLB_REQ_STS 0x18
  32. #define SPRD_DMA_GLB_CHN_EN_STS 0x1c
  33. #define SPRD_DMA_GLB_DEBUG_STS 0x20
  34. #define SPRD_DMA_GLB_ARB_SEL_STS 0x24
  35. #define SPRD_DMA_GLB_2STAGE_GRP1 0x28
  36. #define SPRD_DMA_GLB_2STAGE_GRP2 0x2c
  37. #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
  38. #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
  39. /* DMA channel registers definition */
  40. #define SPRD_DMA_CHN_PAUSE 0x0
  41. #define SPRD_DMA_CHN_REQ 0x4
  42. #define SPRD_DMA_CHN_CFG 0x8
  43. #define SPRD_DMA_CHN_INTC 0xc
  44. #define SPRD_DMA_CHN_SRC_ADDR 0x10
  45. #define SPRD_DMA_CHN_DES_ADDR 0x14
  46. #define SPRD_DMA_CHN_FRG_LEN 0x18
  47. #define SPRD_DMA_CHN_BLK_LEN 0x1c
  48. #define SPRD_DMA_CHN_TRSC_LEN 0x20
  49. #define SPRD_DMA_CHN_TRSF_STEP 0x24
  50. #define SPRD_DMA_CHN_WARP_PTR 0x28
  51. #define SPRD_DMA_CHN_WARP_TO 0x2c
  52. #define SPRD_DMA_CHN_LLIST_PTR 0x30
  53. #define SPRD_DMA_CHN_FRAG_STEP 0x34
  54. #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
  55. #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
  56. /* SPRD_DMA_GLB_2STAGE_GRP register definition */
  57. #define SPRD_DMA_GLB_2STAGE_EN BIT(24)
  58. #define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
  59. #define SPRD_DMA_GLB_DEST_INT BIT(22)
  60. #define SPRD_DMA_GLB_SRC_INT BIT(20)
  61. #define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
  62. #define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
  63. #define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
  64. #define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16)
  65. #define SPRD_DMA_GLB_TRG_OFFSET 16
  66. #define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8)
  67. #define SPRD_DMA_GLB_DEST_CHN_OFFSET 8
  68. #define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0)
  69. /* SPRD_DMA_CHN_INTC register definition */
  70. #define SPRD_DMA_INT_MASK GENMASK(4, 0)
  71. #define SPRD_DMA_INT_CLR_OFFSET 24
  72. #define SPRD_DMA_FRAG_INT_EN BIT(0)
  73. #define SPRD_DMA_BLK_INT_EN BIT(1)
  74. #define SPRD_DMA_TRANS_INT_EN BIT(2)
  75. #define SPRD_DMA_LIST_INT_EN BIT(3)
  76. #define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
  77. /* SPRD_DMA_CHN_CFG register definition */
  78. #define SPRD_DMA_CHN_EN BIT(0)
  79. #define SPRD_DMA_LINKLIST_EN BIT(4)
  80. #define SPRD_DMA_WAIT_BDONE_OFFSET 24
  81. #define SPRD_DMA_DONOT_WAIT_BDONE 1
  82. /* SPRD_DMA_CHN_REQ register definition */
  83. #define SPRD_DMA_REQ_EN BIT(0)
  84. /* SPRD_DMA_CHN_PAUSE register definition */
  85. #define SPRD_DMA_PAUSE_EN BIT(0)
  86. #define SPRD_DMA_PAUSE_STS BIT(2)
  87. #define SPRD_DMA_PAUSE_CNT 0x2000
  88. /* DMA_CHN_WARP_* register definition */
  89. #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
  90. #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
  91. #define SPRD_DMA_WRAP_ADDR_MASK GENMASK(27, 0)
  92. #define SPRD_DMA_HIGH_ADDR_OFFSET 4
  93. /* SPRD_DMA_CHN_INTC register definition */
  94. #define SPRD_DMA_FRAG_INT_STS BIT(16)
  95. #define SPRD_DMA_BLK_INT_STS BIT(17)
  96. #define SPRD_DMA_TRSC_INT_STS BIT(18)
  97. #define SPRD_DMA_LIST_INT_STS BIT(19)
  98. #define SPRD_DMA_CFGERR_INT_STS BIT(20)
  99. #define SPRD_DMA_CHN_INT_STS \
  100. (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
  101. SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
  102. SPRD_DMA_CFGERR_INT_STS)
  103. /* SPRD_DMA_CHN_FRG_LEN register definition */
  104. #define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
  105. #define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
  106. #define SPRD_DMA_SWT_MODE_OFFSET 26
  107. #define SPRD_DMA_REQ_MODE_OFFSET 24
  108. #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
  109. #define SPRD_DMA_WRAP_SEL_DEST BIT(23)
  110. #define SPRD_DMA_WRAP_EN BIT(22)
  111. #define SPRD_DMA_FIX_SEL_OFFSET 21
  112. #define SPRD_DMA_FIX_EN_OFFSET 20
  113. #define SPRD_DMA_LLIST_END BIT(19)
  114. #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
  115. /* SPRD_DMA_CHN_BLK_LEN register definition */
  116. #define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
  117. /* SPRD_DMA_CHN_TRSC_LEN register definition */
  118. #define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
  119. /* SPRD_DMA_CHN_TRSF_STEP register definition */
  120. #define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
  121. #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
  122. #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
  123. /* SPRD DMA_SRC_BLK_STEP register definition */
  124. #define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28)
  125. #define SPRD_DMA_LLIST_HIGH_SHIFT 28
  126. /* define DMA channel mode & trigger mode mask */
  127. #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
  128. #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
  129. #define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0)
  130. /* define the DMA transfer step type */
  131. #define SPRD_DMA_NONE_STEP 0
  132. #define SPRD_DMA_BYTE_STEP 1
  133. #define SPRD_DMA_SHORT_STEP 2
  134. #define SPRD_DMA_WORD_STEP 4
  135. #define SPRD_DMA_DWORD_STEP 8
  136. #define SPRD_DMA_SOFTWARE_UID 0
  137. /* dma data width values */
  138. enum sprd_dma_datawidth {
  139. SPRD_DMA_DATAWIDTH_1_BYTE,
  140. SPRD_DMA_DATAWIDTH_2_BYTES,
  141. SPRD_DMA_DATAWIDTH_4_BYTES,
  142. SPRD_DMA_DATAWIDTH_8_BYTES,
  143. };
  144. /* dma channel hardware configuration */
  145. struct sprd_dma_chn_hw {
  146. u32 pause;
  147. u32 req;
  148. u32 cfg;
  149. u32 intc;
  150. u32 src_addr;
  151. u32 des_addr;
  152. u32 frg_len;
  153. u32 blk_len;
  154. u32 trsc_len;
  155. u32 trsf_step;
  156. u32 wrap_ptr;
  157. u32 wrap_to;
  158. u32 llist_ptr;
  159. u32 frg_step;
  160. u32 src_blk_step;
  161. u32 des_blk_step;
  162. };
  163. /* dma request description */
  164. struct sprd_dma_desc {
  165. struct virt_dma_desc vd;
  166. struct sprd_dma_chn_hw chn_hw;
  167. enum dma_transfer_direction dir;
  168. };
  169. /* dma channel description */
  170. struct sprd_dma_chn {
  171. struct virt_dma_chan vc;
  172. void __iomem *chn_base;
  173. struct sprd_dma_linklist linklist;
  174. struct dma_slave_config slave_cfg;
  175. u32 chn_num;
  176. u32 dev_id;
  177. enum sprd_dma_chn_mode chn_mode;
  178. enum sprd_dma_trg_mode trg_mode;
  179. enum sprd_dma_int_type int_type;
  180. struct sprd_dma_desc *cur_desc;
  181. };
  182. /* SPRD dma device */
  183. struct sprd_dma_dev {
  184. struct dma_device dma_dev;
  185. void __iomem *glb_base;
  186. struct clk *clk;
  187. struct clk *ashb_clk;
  188. int irq;
  189. u32 total_chns;
  190. struct sprd_dma_chn channels[];
  191. };
  192. static void sprd_dma_free_desc(struct virt_dma_desc *vd);
  193. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
  194. static struct of_dma_filter_info sprd_dma_info = {
  195. .filter_fn = sprd_dma_filter_fn,
  196. };
  197. static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
  198. {
  199. return container_of(c, struct sprd_dma_chn, vc.chan);
  200. }
  201. static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
  202. {
  203. struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
  204. return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
  205. }
  206. static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
  207. {
  208. return container_of(vd, struct sprd_dma_desc, vd);
  209. }
  210. static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg,
  211. u32 mask, u32 val)
  212. {
  213. u32 orig = readl(sdev->glb_base + reg);
  214. u32 tmp;
  215. tmp = (orig & ~mask) | val;
  216. writel(tmp, sdev->glb_base + reg);
  217. }
  218. static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
  219. u32 mask, u32 val)
  220. {
  221. u32 orig = readl(schan->chn_base + reg);
  222. u32 tmp;
  223. tmp = (orig & ~mask) | val;
  224. writel(tmp, schan->chn_base + reg);
  225. }
  226. static int sprd_dma_enable(struct sprd_dma_dev *sdev)
  227. {
  228. int ret;
  229. ret = clk_prepare_enable(sdev->clk);
  230. if (ret)
  231. return ret;
  232. /*
  233. * The ashb_clk is optional and only for AGCP DMA controller, so we
  234. * need add one condition to check if the ashb_clk need enable.
  235. */
  236. if (!IS_ERR(sdev->ashb_clk))
  237. ret = clk_prepare_enable(sdev->ashb_clk);
  238. return ret;
  239. }
  240. static void sprd_dma_disable(struct sprd_dma_dev *sdev)
  241. {
  242. clk_disable_unprepare(sdev->clk);
  243. /*
  244. * Need to check if we need disable the optional ashb_clk for AGCP DMA.
  245. */
  246. if (!IS_ERR(sdev->ashb_clk))
  247. clk_disable_unprepare(sdev->ashb_clk);
  248. }
  249. static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
  250. {
  251. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  252. u32 dev_id = schan->dev_id;
  253. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  254. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  255. SPRD_DMA_GLB_REQ_UID(dev_id);
  256. writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
  257. }
  258. }
  259. static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
  260. {
  261. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  262. u32 dev_id = schan->dev_id;
  263. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  264. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  265. SPRD_DMA_GLB_REQ_UID(dev_id);
  266. writel(0, sdev->glb_base + uid_offset);
  267. }
  268. }
  269. static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
  270. {
  271. sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
  272. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
  273. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
  274. }
  275. static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
  276. {
  277. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
  278. SPRD_DMA_CHN_EN);
  279. }
  280. static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
  281. {
  282. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
  283. }
  284. static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
  285. {
  286. sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
  287. SPRD_DMA_REQ_EN);
  288. }
  289. static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
  290. {
  291. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  292. u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
  293. if (enable) {
  294. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  295. SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
  296. do {
  297. pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
  298. if (pause & SPRD_DMA_PAUSE_STS)
  299. break;
  300. cpu_relax();
  301. } while (--timeout > 0);
  302. if (!timeout)
  303. dev_warn(sdev->dma_dev.dev,
  304. "pause dma controller timeout\n");
  305. } else {
  306. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  307. SPRD_DMA_PAUSE_EN, 0);
  308. }
  309. }
  310. static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
  311. {
  312. u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
  313. if (!(cfg & SPRD_DMA_CHN_EN))
  314. return;
  315. sprd_dma_pause_resume(schan, true);
  316. sprd_dma_disable_chn(schan);
  317. }
  318. static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan)
  319. {
  320. unsigned long addr, addr_high;
  321. addr = readl(schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  322. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_PTR) &
  323. SPRD_DMA_HIGH_ADDR_MASK;
  324. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  325. }
  326. static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
  327. {
  328. unsigned long addr, addr_high;
  329. addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  330. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
  331. SPRD_DMA_HIGH_ADDR_MASK;
  332. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  333. }
  334. static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
  335. {
  336. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  337. u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
  338. SPRD_DMA_CHN_INT_STS;
  339. switch (intc_sts) {
  340. case SPRD_DMA_CFGERR_INT_STS:
  341. return SPRD_DMA_CFGERR_INT;
  342. case SPRD_DMA_LIST_INT_STS:
  343. return SPRD_DMA_LIST_INT;
  344. case SPRD_DMA_TRSC_INT_STS:
  345. return SPRD_DMA_TRANS_INT;
  346. case SPRD_DMA_BLK_INT_STS:
  347. return SPRD_DMA_BLK_INT;
  348. case SPRD_DMA_FRAG_INT_STS:
  349. return SPRD_DMA_FRAG_INT;
  350. default:
  351. dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
  352. return SPRD_DMA_NO_INT;
  353. }
  354. }
  355. static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
  356. {
  357. u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  358. return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
  359. }
  360. static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
  361. {
  362. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  363. u32 val, chn = schan->chn_num + 1;
  364. switch (schan->chn_mode) {
  365. case SPRD_DMA_SRC_CHN0:
  366. val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
  367. val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
  368. val |= SPRD_DMA_GLB_2STAGE_EN;
  369. if (schan->int_type != SPRD_DMA_NO_INT)
  370. val |= SPRD_DMA_GLB_SRC_INT;
  371. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
  372. break;
  373. case SPRD_DMA_SRC_CHN1:
  374. val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
  375. val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
  376. val |= SPRD_DMA_GLB_2STAGE_EN;
  377. if (schan->int_type != SPRD_DMA_NO_INT)
  378. val |= SPRD_DMA_GLB_SRC_INT;
  379. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
  380. break;
  381. case SPRD_DMA_DST_CHN0:
  382. val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
  383. SPRD_DMA_GLB_DEST_CHN_MASK;
  384. val |= SPRD_DMA_GLB_2STAGE_EN;
  385. if (schan->int_type != SPRD_DMA_NO_INT)
  386. val |= SPRD_DMA_GLB_DEST_INT;
  387. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
  388. break;
  389. case SPRD_DMA_DST_CHN1:
  390. val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
  391. SPRD_DMA_GLB_DEST_CHN_MASK;
  392. val |= SPRD_DMA_GLB_2STAGE_EN;
  393. if (schan->int_type != SPRD_DMA_NO_INT)
  394. val |= SPRD_DMA_GLB_DEST_INT;
  395. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
  396. break;
  397. default:
  398. dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n",
  399. schan->chn_mode);
  400. return -EINVAL;
  401. }
  402. return 0;
  403. }
  404. static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
  405. {
  406. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  407. u32 reg, val, req_id;
  408. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
  409. return;
  410. /* The DMA request id always starts from 0. */
  411. req_id = schan->dev_id - 1;
  412. if (req_id < 32) {
  413. reg = SPRD_DMA_GLB_REQ_PEND0_EN;
  414. val = BIT(req_id);
  415. } else {
  416. reg = SPRD_DMA_GLB_REQ_PEND1_EN;
  417. val = BIT(req_id - 32);
  418. }
  419. sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
  420. }
  421. static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
  422. struct sprd_dma_desc *sdesc)
  423. {
  424. struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
  425. writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
  426. writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
  427. writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
  428. writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  429. writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  430. writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  431. writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
  432. writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
  433. writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
  434. writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
  435. writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
  436. writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
  437. writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
  438. writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
  439. writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
  440. writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
  441. }
  442. static void sprd_dma_start(struct sprd_dma_chn *schan)
  443. {
  444. struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
  445. if (!vd)
  446. return;
  447. list_del(&vd->node);
  448. schan->cur_desc = to_sprd_dma_desc(vd);
  449. /*
  450. * Set 2-stage configuration if the channel starts one 2-stage
  451. * transfer.
  452. */
  453. if (schan->chn_mode && sprd_dma_set_2stage_config(schan))
  454. return;
  455. /*
  456. * Copy the DMA configuration from DMA descriptor to this hardware
  457. * channel.
  458. */
  459. sprd_dma_set_chn_config(schan, schan->cur_desc);
  460. sprd_dma_set_uid(schan);
  461. sprd_dma_set_pending(schan, true);
  462. sprd_dma_enable_chn(schan);
  463. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
  464. schan->chn_mode != SPRD_DMA_DST_CHN0 &&
  465. schan->chn_mode != SPRD_DMA_DST_CHN1)
  466. sprd_dma_soft_request(schan);
  467. }
  468. static void sprd_dma_stop(struct sprd_dma_chn *schan)
  469. {
  470. sprd_dma_stop_and_disable(schan);
  471. sprd_dma_set_pending(schan, false);
  472. sprd_dma_unset_uid(schan);
  473. sprd_dma_clear_int(schan);
  474. schan->cur_desc = NULL;
  475. }
  476. static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
  477. enum sprd_dma_int_type int_type,
  478. enum sprd_dma_req_mode req_mode)
  479. {
  480. if (int_type == SPRD_DMA_NO_INT)
  481. return false;
  482. if (int_type >= req_mode + 1)
  483. return true;
  484. else
  485. return false;
  486. }
  487. static irqreturn_t dma_irq_handle(int irq, void *dev_id)
  488. {
  489. struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
  490. u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
  491. struct sprd_dma_chn *schan;
  492. struct sprd_dma_desc *sdesc;
  493. enum sprd_dma_req_mode req_type;
  494. enum sprd_dma_int_type int_type;
  495. bool trans_done = false, cyclic = false;
  496. u32 i;
  497. while (irq_status) {
  498. i = __ffs(irq_status);
  499. irq_status &= (irq_status - 1);
  500. schan = &sdev->channels[i];
  501. spin_lock(&schan->vc.lock);
  502. sdesc = schan->cur_desc;
  503. if (!sdesc) {
  504. spin_unlock(&schan->vc.lock);
  505. return IRQ_HANDLED;
  506. }
  507. int_type = sprd_dma_get_int_type(schan);
  508. req_type = sprd_dma_get_req_type(schan);
  509. sprd_dma_clear_int(schan);
  510. /* cyclic mode schedule callback */
  511. cyclic = schan->linklist.phy_addr ? true : false;
  512. if (cyclic == true) {
  513. vchan_cyclic_callback(&sdesc->vd);
  514. } else {
  515. /* Check if the dma request descriptor is done. */
  516. trans_done = sprd_dma_check_trans_done(sdesc, int_type,
  517. req_type);
  518. if (trans_done == true) {
  519. vchan_cookie_complete(&sdesc->vd);
  520. schan->cur_desc = NULL;
  521. sprd_dma_start(schan);
  522. }
  523. }
  524. spin_unlock(&schan->vc.lock);
  525. }
  526. return IRQ_HANDLED;
  527. }
  528. static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
  529. {
  530. return pm_runtime_get_sync(chan->device->dev);
  531. }
  532. static void sprd_dma_free_chan_resources(struct dma_chan *chan)
  533. {
  534. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  535. struct virt_dma_desc *cur_vd = NULL;
  536. unsigned long flags;
  537. spin_lock_irqsave(&schan->vc.lock, flags);
  538. if (schan->cur_desc)
  539. cur_vd = &schan->cur_desc->vd;
  540. sprd_dma_stop(schan);
  541. spin_unlock_irqrestore(&schan->vc.lock, flags);
  542. if (cur_vd)
  543. sprd_dma_free_desc(cur_vd);
  544. vchan_free_chan_resources(&schan->vc);
  545. pm_runtime_put(chan->device->dev);
  546. }
  547. static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
  548. dma_cookie_t cookie,
  549. struct dma_tx_state *txstate)
  550. {
  551. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  552. struct virt_dma_desc *vd;
  553. unsigned long flags;
  554. enum dma_status ret;
  555. u32 pos;
  556. ret = dma_cookie_status(chan, cookie, txstate);
  557. if (ret == DMA_COMPLETE || !txstate)
  558. return ret;
  559. spin_lock_irqsave(&schan->vc.lock, flags);
  560. vd = vchan_find_desc(&schan->vc, cookie);
  561. if (vd) {
  562. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  563. struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
  564. if (hw->trsc_len > 0)
  565. pos = hw->trsc_len;
  566. else if (hw->blk_len > 0)
  567. pos = hw->blk_len;
  568. else if (hw->frg_len > 0)
  569. pos = hw->frg_len;
  570. else
  571. pos = 0;
  572. } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
  573. struct sprd_dma_desc *sdesc = schan->cur_desc;
  574. if (sdesc->dir == DMA_DEV_TO_MEM)
  575. pos = sprd_dma_get_dst_addr(schan);
  576. else
  577. pos = sprd_dma_get_src_addr(schan);
  578. } else {
  579. pos = 0;
  580. }
  581. spin_unlock_irqrestore(&schan->vc.lock, flags);
  582. dma_set_residue(txstate, pos);
  583. return ret;
  584. }
  585. static void sprd_dma_issue_pending(struct dma_chan *chan)
  586. {
  587. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  588. unsigned long flags;
  589. spin_lock_irqsave(&schan->vc.lock, flags);
  590. if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
  591. sprd_dma_start(schan);
  592. spin_unlock_irqrestore(&schan->vc.lock, flags);
  593. }
  594. static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
  595. {
  596. switch (buswidth) {
  597. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  598. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  599. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  600. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  601. return ffs(buswidth) - 1;
  602. default:
  603. return -EINVAL;
  604. }
  605. }
  606. static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
  607. {
  608. switch (buswidth) {
  609. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  610. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  611. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  612. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  613. return buswidth;
  614. default:
  615. return -EINVAL;
  616. }
  617. }
  618. static int sprd_dma_fill_desc(struct dma_chan *chan,
  619. struct sprd_dma_chn_hw *hw,
  620. unsigned int sglen, int sg_index,
  621. dma_addr_t src, dma_addr_t dst, u32 len,
  622. enum dma_transfer_direction dir,
  623. unsigned long flags,
  624. struct dma_slave_config *slave_cfg)
  625. {
  626. struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
  627. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  628. enum sprd_dma_chn_mode chn_mode = schan->chn_mode;
  629. u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
  630. u32 int_mode = flags & SPRD_DMA_INT_MASK;
  631. int src_datawidth, dst_datawidth, src_step, dst_step;
  632. u32 temp, fix_mode = 0, fix_en = 0;
  633. phys_addr_t llist_ptr;
  634. if (dir == DMA_MEM_TO_DEV) {
  635. src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
  636. if (src_step < 0) {
  637. dev_err(sdev->dma_dev.dev, "invalid source step\n");
  638. return src_step;
  639. }
  640. /*
  641. * For 2-stage transfer, destination channel step can not be 0,
  642. * since destination device is AON IRAM.
  643. */
  644. if (chn_mode == SPRD_DMA_DST_CHN0 ||
  645. chn_mode == SPRD_DMA_DST_CHN1)
  646. dst_step = src_step;
  647. else
  648. dst_step = SPRD_DMA_NONE_STEP;
  649. } else {
  650. dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
  651. if (dst_step < 0) {
  652. dev_err(sdev->dma_dev.dev, "invalid destination step\n");
  653. return dst_step;
  654. }
  655. src_step = SPRD_DMA_NONE_STEP;
  656. }
  657. src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
  658. if (src_datawidth < 0) {
  659. dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
  660. return src_datawidth;
  661. }
  662. dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
  663. if (dst_datawidth < 0) {
  664. dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
  665. return dst_datawidth;
  666. }
  667. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  668. /*
  669. * wrap_ptr and wrap_to will save the high 4 bits source address and
  670. * destination address.
  671. */
  672. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  673. hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  674. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  675. hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
  676. /*
  677. * If the src step and dst step both are 0 or both are not 0, that means
  678. * we can not enable the fix mode. If one is 0 and another one is not,
  679. * we can enable the fix mode.
  680. */
  681. if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
  682. fix_en = 0;
  683. } else {
  684. fix_en = 1;
  685. if (src_step)
  686. fix_mode = 1;
  687. else
  688. fix_mode = 0;
  689. }
  690. hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
  691. temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  692. temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  693. temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
  694. temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
  695. temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
  696. temp |= schan->linklist.wrap_addr ?
  697. SPRD_DMA_WRAP_EN | SPRD_DMA_WRAP_SEL_DEST : 0;
  698. temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
  699. hw->frg_len = temp;
  700. hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
  701. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  702. temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  703. temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  704. hw->trsf_step = temp;
  705. /* link-list configuration */
  706. if (schan->linklist.phy_addr) {
  707. hw->cfg |= SPRD_DMA_LINKLIST_EN;
  708. /* link-list index */
  709. temp = sglen ? (sg_index + 1) % sglen : 0;
  710. /* Next link-list configuration's physical address offset */
  711. temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
  712. /*
  713. * Set the link-list pointer point to next link-list
  714. * configuration's physical address.
  715. */
  716. llist_ptr = schan->linklist.phy_addr + temp;
  717. hw->llist_ptr = lower_32_bits(llist_ptr);
  718. hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
  719. SPRD_DMA_LLIST_HIGH_MASK;
  720. if (schan->linklist.wrap_addr) {
  721. hw->wrap_ptr |= schan->linklist.wrap_addr &
  722. SPRD_DMA_WRAP_ADDR_MASK;
  723. hw->wrap_to |= dst & SPRD_DMA_WRAP_ADDR_MASK;
  724. }
  725. } else {
  726. hw->llist_ptr = 0;
  727. hw->src_blk_step = 0;
  728. }
  729. hw->frg_step = 0;
  730. hw->des_blk_step = 0;
  731. return 0;
  732. }
  733. static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
  734. unsigned int sglen, int sg_index,
  735. dma_addr_t src, dma_addr_t dst, u32 len,
  736. enum dma_transfer_direction dir,
  737. unsigned long flags,
  738. struct dma_slave_config *slave_cfg)
  739. {
  740. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  741. struct sprd_dma_chn_hw *hw;
  742. if (!schan->linklist.virt_addr)
  743. return -EINVAL;
  744. hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
  745. sg_index * sizeof(*hw));
  746. return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
  747. dir, flags, slave_cfg);
  748. }
  749. static struct dma_async_tx_descriptor *
  750. sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  751. size_t len, unsigned long flags)
  752. {
  753. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  754. struct sprd_dma_desc *sdesc;
  755. struct sprd_dma_chn_hw *hw;
  756. enum sprd_dma_datawidth datawidth;
  757. u32 step, temp;
  758. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  759. if (!sdesc)
  760. return NULL;
  761. hw = &sdesc->chn_hw;
  762. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  763. hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
  764. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  765. hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
  766. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  767. SPRD_DMA_HIGH_ADDR_MASK;
  768. hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  769. SPRD_DMA_HIGH_ADDR_MASK;
  770. if (IS_ALIGNED(len, 8)) {
  771. datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
  772. step = SPRD_DMA_DWORD_STEP;
  773. } else if (IS_ALIGNED(len, 4)) {
  774. datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
  775. step = SPRD_DMA_WORD_STEP;
  776. } else if (IS_ALIGNED(len, 2)) {
  777. datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
  778. step = SPRD_DMA_SHORT_STEP;
  779. } else {
  780. datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
  781. step = SPRD_DMA_BYTE_STEP;
  782. }
  783. temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  784. temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  785. temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
  786. temp |= len & SPRD_DMA_FRG_LEN_MASK;
  787. hw->frg_len = temp;
  788. hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
  789. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  790. temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  791. temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  792. hw->trsf_step = temp;
  793. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  794. }
  795. static struct dma_async_tx_descriptor *
  796. sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  797. unsigned int sglen, enum dma_transfer_direction dir,
  798. unsigned long flags, void *context)
  799. {
  800. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  801. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  802. dma_addr_t src = 0, dst = 0;
  803. dma_addr_t start_src = 0, start_dst = 0;
  804. struct sprd_dma_desc *sdesc;
  805. struct scatterlist *sg;
  806. u32 len = 0;
  807. int ret, i;
  808. if (!is_slave_direction(dir))
  809. return NULL;
  810. if (context) {
  811. struct sprd_dma_linklist *ll_cfg =
  812. (struct sprd_dma_linklist *)context;
  813. schan->linklist.phy_addr = ll_cfg->phy_addr;
  814. schan->linklist.virt_addr = ll_cfg->virt_addr;
  815. schan->linklist.wrap_addr = ll_cfg->wrap_addr;
  816. } else {
  817. schan->linklist.phy_addr = 0;
  818. schan->linklist.virt_addr = 0;
  819. schan->linklist.wrap_addr = 0;
  820. }
  821. /*
  822. * Set channel mode, interrupt mode and trigger mode for 2-stage
  823. * transfer.
  824. */
  825. schan->chn_mode =
  826. (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
  827. schan->trg_mode =
  828. (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
  829. schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
  830. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  831. if (!sdesc)
  832. return NULL;
  833. sdesc->dir = dir;
  834. for_each_sg(sgl, sg, sglen, i) {
  835. len = sg_dma_len(sg);
  836. if (dir == DMA_MEM_TO_DEV) {
  837. src = sg_dma_address(sg);
  838. dst = slave_cfg->dst_addr;
  839. } else {
  840. src = slave_cfg->src_addr;
  841. dst = sg_dma_address(sg);
  842. }
  843. if (!i) {
  844. start_src = src;
  845. start_dst = dst;
  846. }
  847. /*
  848. * The link-list mode needs at least 2 link-list
  849. * configurations. If there is only one sg, it doesn't
  850. * need to fill the link-list configuration.
  851. */
  852. if (sglen < 2)
  853. break;
  854. ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
  855. dir, flags, slave_cfg);
  856. if (ret) {
  857. kfree(sdesc);
  858. return NULL;
  859. }
  860. }
  861. ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
  862. start_dst, len, dir, flags, slave_cfg);
  863. if (ret) {
  864. kfree(sdesc);
  865. return NULL;
  866. }
  867. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  868. }
  869. static int sprd_dma_slave_config(struct dma_chan *chan,
  870. struct dma_slave_config *config)
  871. {
  872. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  873. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  874. memcpy(slave_cfg, config, sizeof(*config));
  875. return 0;
  876. }
  877. static int sprd_dma_pause(struct dma_chan *chan)
  878. {
  879. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  880. unsigned long flags;
  881. spin_lock_irqsave(&schan->vc.lock, flags);
  882. sprd_dma_pause_resume(schan, true);
  883. spin_unlock_irqrestore(&schan->vc.lock, flags);
  884. return 0;
  885. }
  886. static int sprd_dma_resume(struct dma_chan *chan)
  887. {
  888. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  889. unsigned long flags;
  890. spin_lock_irqsave(&schan->vc.lock, flags);
  891. sprd_dma_pause_resume(schan, false);
  892. spin_unlock_irqrestore(&schan->vc.lock, flags);
  893. return 0;
  894. }
  895. static int sprd_dma_terminate_all(struct dma_chan *chan)
  896. {
  897. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  898. struct virt_dma_desc *cur_vd = NULL;
  899. unsigned long flags;
  900. LIST_HEAD(head);
  901. spin_lock_irqsave(&schan->vc.lock, flags);
  902. if (schan->cur_desc)
  903. cur_vd = &schan->cur_desc->vd;
  904. sprd_dma_stop(schan);
  905. vchan_get_all_descriptors(&schan->vc, &head);
  906. spin_unlock_irqrestore(&schan->vc.lock, flags);
  907. if (cur_vd)
  908. sprd_dma_free_desc(cur_vd);
  909. vchan_dma_desc_free_list(&schan->vc, &head);
  910. return 0;
  911. }
  912. static void sprd_dma_free_desc(struct virt_dma_desc *vd)
  913. {
  914. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  915. kfree(sdesc);
  916. }
  917. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
  918. {
  919. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  920. u32 slave_id = *(u32 *)param;
  921. schan->dev_id = slave_id;
  922. return true;
  923. }
  924. static int sprd_dma_probe(struct platform_device *pdev)
  925. {
  926. struct device_node *np = pdev->dev.of_node;
  927. struct sprd_dma_dev *sdev;
  928. struct sprd_dma_chn *dma_chn;
  929. u32 chn_count;
  930. int ret, i;
  931. /* Parse new and deprecated dma-channels properties */
  932. ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count);
  933. if (ret)
  934. ret = device_property_read_u32(&pdev->dev, "#dma-channels",
  935. &chn_count);
  936. if (ret) {
  937. dev_err(&pdev->dev, "get dma channels count failed\n");
  938. return ret;
  939. }
  940. sdev = devm_kzalloc(&pdev->dev,
  941. struct_size(sdev, channels, chn_count),
  942. GFP_KERNEL);
  943. if (!sdev)
  944. return -ENOMEM;
  945. sdev->clk = devm_clk_get(&pdev->dev, "enable");
  946. if (IS_ERR(sdev->clk)) {
  947. dev_err(&pdev->dev, "get enable clock failed\n");
  948. return PTR_ERR(sdev->clk);
  949. }
  950. /* ashb clock is optional for AGCP DMA */
  951. sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
  952. if (IS_ERR(sdev->ashb_clk))
  953. dev_warn(&pdev->dev, "no optional ashb eb clock\n");
  954. /*
  955. * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
  956. * DMA controller, it can or do not request the irq, which will save
  957. * system power without resuming system by DMA interrupts if AGCP DMA
  958. * does not request the irq. Thus the DMA interrupts property should
  959. * be optional.
  960. */
  961. sdev->irq = platform_get_irq(pdev, 0);
  962. if (sdev->irq > 0) {
  963. ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
  964. 0, "sprd_dma", (void *)sdev);
  965. if (ret < 0) {
  966. dev_err(&pdev->dev, "request dma irq failed\n");
  967. return ret;
  968. }
  969. } else {
  970. dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
  971. }
  972. sdev->glb_base = devm_platform_ioremap_resource(pdev, 0);
  973. if (IS_ERR(sdev->glb_base))
  974. return PTR_ERR(sdev->glb_base);
  975. dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
  976. sdev->total_chns = chn_count;
  977. sdev->dma_dev.chancnt = chn_count;
  978. INIT_LIST_HEAD(&sdev->dma_dev.channels);
  979. INIT_LIST_HEAD(&sdev->dma_dev.global_node);
  980. sdev->dma_dev.dev = &pdev->dev;
  981. sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
  982. sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
  983. sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
  984. sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
  985. sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
  986. sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
  987. sdev->dma_dev.device_config = sprd_dma_slave_config;
  988. sdev->dma_dev.device_pause = sprd_dma_pause;
  989. sdev->dma_dev.device_resume = sprd_dma_resume;
  990. sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
  991. for (i = 0; i < chn_count; i++) {
  992. dma_chn = &sdev->channels[i];
  993. dma_chn->chn_num = i;
  994. dma_chn->cur_desc = NULL;
  995. /* get each channel's registers base address. */
  996. dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
  997. SPRD_DMA_CHN_REG_LENGTH * i;
  998. dma_chn->vc.desc_free = sprd_dma_free_desc;
  999. vchan_init(&dma_chn->vc, &sdev->dma_dev);
  1000. }
  1001. platform_set_drvdata(pdev, sdev);
  1002. ret = sprd_dma_enable(sdev);
  1003. if (ret)
  1004. return ret;
  1005. pm_runtime_set_active(&pdev->dev);
  1006. pm_runtime_enable(&pdev->dev);
  1007. ret = pm_runtime_get_sync(&pdev->dev);
  1008. if (ret < 0)
  1009. goto err_rpm;
  1010. ret = dma_async_device_register(&sdev->dma_dev);
  1011. if (ret < 0) {
  1012. dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
  1013. goto err_register;
  1014. }
  1015. sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
  1016. ret = of_dma_controller_register(np, of_dma_simple_xlate,
  1017. &sprd_dma_info);
  1018. if (ret)
  1019. goto err_of_register;
  1020. pm_runtime_put(&pdev->dev);
  1021. return 0;
  1022. err_of_register:
  1023. dma_async_device_unregister(&sdev->dma_dev);
  1024. err_register:
  1025. pm_runtime_put_noidle(&pdev->dev);
  1026. pm_runtime_disable(&pdev->dev);
  1027. err_rpm:
  1028. sprd_dma_disable(sdev);
  1029. return ret;
  1030. }
  1031. static int sprd_dma_remove(struct platform_device *pdev)
  1032. {
  1033. struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
  1034. struct sprd_dma_chn *c, *cn;
  1035. pm_runtime_get_sync(&pdev->dev);
  1036. /* explicitly free the irq */
  1037. if (sdev->irq > 0)
  1038. devm_free_irq(&pdev->dev, sdev->irq, sdev);
  1039. list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
  1040. vc.chan.device_node) {
  1041. list_del(&c->vc.chan.device_node);
  1042. tasklet_kill(&c->vc.task);
  1043. }
  1044. of_dma_controller_free(pdev->dev.of_node);
  1045. dma_async_device_unregister(&sdev->dma_dev);
  1046. sprd_dma_disable(sdev);
  1047. pm_runtime_put_noidle(&pdev->dev);
  1048. pm_runtime_disable(&pdev->dev);
  1049. return 0;
  1050. }
  1051. static const struct of_device_id sprd_dma_match[] = {
  1052. { .compatible = "sprd,sc9860-dma", },
  1053. {},
  1054. };
  1055. MODULE_DEVICE_TABLE(of, sprd_dma_match);
  1056. static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
  1057. {
  1058. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  1059. sprd_dma_disable(sdev);
  1060. return 0;
  1061. }
  1062. static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
  1063. {
  1064. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  1065. int ret;
  1066. ret = sprd_dma_enable(sdev);
  1067. if (ret)
  1068. dev_err(sdev->dma_dev.dev, "enable dma failed\n");
  1069. return ret;
  1070. }
  1071. static const struct dev_pm_ops sprd_dma_pm_ops = {
  1072. SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
  1073. sprd_dma_runtime_resume,
  1074. NULL)
  1075. };
  1076. static struct platform_driver sprd_dma_driver = {
  1077. .probe = sprd_dma_probe,
  1078. .remove = sprd_dma_remove,
  1079. .driver = {
  1080. .name = "sprd-dma",
  1081. .of_match_table = sprd_dma_match,
  1082. .pm = &sprd_dma_pm_ops,
  1083. },
  1084. };
  1085. module_platform_driver(sprd_dma_driver);
  1086. MODULE_LICENSE("GPL v2");
  1087. MODULE_DESCRIPTION("DMA driver for Spreadtrum");
  1088. MODULE_AUTHOR("Baolin Wang <[email protected]>");
  1089. MODULE_AUTHOR("Eric Long <[email protected]>");
  1090. MODULE_ALIAS("platform:sprd-dma");