tegra186-gpc-dma.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * DMA driver for NVIDIA Tegra GPC DMA controller.
  4. *
  5. * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
  6. */
  7. #include <linux/bitfield.h>
  8. #include <linux/dmaengine.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/iommu.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/minmax.h>
  14. #include <linux/module.h>
  15. #include <linux/of_device.h>
  16. #include <linux/of_dma.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/reset.h>
  19. #include <linux/slab.h>
  20. #include <dt-bindings/memory/tegra186-mc.h>
  21. #include "virt-dma.h"
  22. /* CSR register */
  23. #define TEGRA_GPCDMA_CHAN_CSR 0x00
  24. #define TEGRA_GPCDMA_CSR_ENB BIT(31)
  25. #define TEGRA_GPCDMA_CSR_IE_EOC BIT(30)
  26. #define TEGRA_GPCDMA_CSR_ONCE BIT(27)
  27. #define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24)
  28. #define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \
  29. FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0)
  30. #define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \
  31. FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1)
  32. #define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \
  33. FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2)
  34. #define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \
  35. FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3)
  36. #define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21)
  37. #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \
  38. FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0)
  39. #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \
  40. FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1)
  41. #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \
  42. FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2)
  43. #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \
  44. FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3)
  45. #define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \
  46. FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4)
  47. #define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \
  48. FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6)
  49. #define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16)
  50. #define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \
  51. FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4)
  52. #define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15)
  53. #define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10)
  54. /* STATUS register */
  55. #define TEGRA_GPCDMA_CHAN_STATUS 0x004
  56. #define TEGRA_GPCDMA_STATUS_BUSY BIT(31)
  57. #define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30)
  58. #define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28)
  59. #define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27)
  60. #define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26)
  61. #define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25)
  62. #define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24)
  63. #define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23)
  64. #define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21)
  65. #define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20)
  66. #define TEGRA_GPCDMA_CHAN_CSRE 0x008
  67. #define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31)
  68. /* Source address */
  69. #define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C
  70. /* Destination address */
  71. #define TEGRA_GPCDMA_CHAN_DST_PTR 0x010
  72. /* High address pointer */
  73. #define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014
  74. #define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0)
  75. #define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16)
  76. /* MC sequence register */
  77. #define TEGRA_GPCDMA_CHAN_MCSEQ 0x18
  78. #define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31)
  79. #define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25)
  80. #define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23)
  81. #define TEGRA_GPCDMA_MCSEQ_BURST_2 \
  82. FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0)
  83. #define TEGRA_GPCDMA_MCSEQ_BURST_16 \
  84. FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3)
  85. #define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20)
  86. #define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17)
  87. #define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0
  88. #define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7)
  89. #define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0)
  90. /* MMIO sequence register */
  91. #define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c
  92. #define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31)
  93. #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28)
  94. #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \
  95. FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0)
  96. #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \
  97. FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1)
  98. #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \
  99. FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2)
  100. #define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27)
  101. #define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23
  102. #define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U
  103. #define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U
  104. #define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \
  105. (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT)
  106. #define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19)
  107. #define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16)
  108. #define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7)
  109. /* Channel WCOUNT */
  110. #define TEGRA_GPCDMA_CHAN_WCOUNT 0x20
  111. /* Transfer count */
  112. #define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24
  113. /* DMA byte count status */
  114. #define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28
  115. /* Error Status Register */
  116. #define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30
  117. #define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8
  118. #define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF
  119. #define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \
  120. ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \
  121. TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK)
  122. #define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF
  123. #define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE
  124. #define TEGRA_DMA_PERIPH_ID_ERR 0xD
  125. #define TEGRA_DMA_STREAM_ID_ERR 0xC
  126. #define TEGRA_DMA_MC_SLAVE_ERR 0xB
  127. #define TEGRA_DMA_MMIO_SLAVE_ERR 0xA
  128. /* Fixed Pattern */
  129. #define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34
  130. #define TEGRA_GPCDMA_CHAN_TZ 0x38
  131. #define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0)
  132. #define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1)
  133. #define TEGRA_GPCDMA_CHAN_SPARE 0x3c
  134. #define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16)
  135. /*
  136. * If any burst is in flight and DMA paused then this is the time to complete
  137. * on-flight burst and update DMA status register.
  138. */
  139. #define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10
  140. #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */
  141. /* Channel base address offset from GPCDMA base address */
  142. #define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
  143. struct tegra_dma;
  144. struct tegra_dma_channel;
  145. /*
  146. * tegra_dma_chip_data Tegra chip specific DMA data
  147. * @nr_channels: Number of channels available in the controller.
  148. * @channel_reg_size: Channel register size.
  149. * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
  150. * @hw_support_pause: DMA HW engine support pause of the channel.
  151. */
  152. struct tegra_dma_chip_data {
  153. bool hw_support_pause;
  154. unsigned int nr_channels;
  155. unsigned int channel_reg_size;
  156. unsigned int max_dma_count;
  157. int (*terminate)(struct tegra_dma_channel *tdc);
  158. };
  159. /* DMA channel registers */
  160. struct tegra_dma_channel_regs {
  161. u32 csr;
  162. u32 src_ptr;
  163. u32 dst_ptr;
  164. u32 high_addr_ptr;
  165. u32 mc_seq;
  166. u32 mmio_seq;
  167. u32 wcount;
  168. u32 fixed_pattern;
  169. };
  170. /*
  171. * tegra_dma_sg_req: DMA request details to configure hardware. This
  172. * contains the details for one transfer to configure DMA hw.
  173. * The client's request for data transfer can be broken into multiple
  174. * sub-transfer as per requester details and hw support. This sub transfer
  175. * get added as an array in Tegra DMA desc which manages the transfer details.
  176. */
  177. struct tegra_dma_sg_req {
  178. unsigned int len;
  179. struct tegra_dma_channel_regs ch_regs;
  180. };
  181. /*
  182. * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to
  183. * manage client request and keep track of transfer status, callbacks
  184. * and request counts etc.
  185. */
  186. struct tegra_dma_desc {
  187. bool cyclic;
  188. unsigned int bytes_req;
  189. unsigned int bytes_xfer;
  190. unsigned int sg_idx;
  191. unsigned int sg_count;
  192. struct virt_dma_desc vd;
  193. struct tegra_dma_channel *tdc;
  194. struct tegra_dma_sg_req sg_req[];
  195. };
  196. /*
  197. * tegra_dma_channel: Channel specific information
  198. */
  199. struct tegra_dma_channel {
  200. bool config_init;
  201. char name[30];
  202. enum dma_transfer_direction sid_dir;
  203. int id;
  204. int irq;
  205. int slave_id;
  206. struct tegra_dma *tdma;
  207. struct virt_dma_chan vc;
  208. struct tegra_dma_desc *dma_desc;
  209. struct dma_slave_config dma_sconfig;
  210. unsigned int stream_id;
  211. unsigned long chan_base_offset;
  212. };
  213. /*
  214. * tegra_dma: Tegra DMA specific information
  215. */
  216. struct tegra_dma {
  217. const struct tegra_dma_chip_data *chip_data;
  218. unsigned long sid_m2d_reserved;
  219. unsigned long sid_d2m_reserved;
  220. void __iomem *base_addr;
  221. struct device *dev;
  222. struct dma_device dma_dev;
  223. struct reset_control *rst;
  224. struct tegra_dma_channel channels[];
  225. };
  226. static inline void tdc_write(struct tegra_dma_channel *tdc,
  227. u32 reg, u32 val)
  228. {
  229. writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
  230. }
  231. static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
  232. {
  233. return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
  234. }
  235. static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
  236. {
  237. return container_of(dc, struct tegra_dma_channel, vc.chan);
  238. }
  239. static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd)
  240. {
  241. return container_of(vd, struct tegra_dma_desc, vd);
  242. }
  243. static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
  244. {
  245. return tdc->vc.chan.device->dev;
  246. }
  247. static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc)
  248. {
  249. dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n",
  250. tdc->id, tdc->name);
  251. dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n",
  252. tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR),
  253. tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS),
  254. tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE),
  255. tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR),
  256. tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR)
  257. );
  258. dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n",
  259. tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ),
  260. tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ),
  261. tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT),
  262. tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT),
  263. tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS)
  264. );
  265. dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n",
  266. tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS));
  267. }
  268. static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc,
  269. enum dma_transfer_direction direction)
  270. {
  271. struct tegra_dma *tdma = tdc->tdma;
  272. int sid = tdc->slave_id;
  273. if (!is_slave_direction(direction))
  274. return 0;
  275. switch (direction) {
  276. case DMA_MEM_TO_DEV:
  277. if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) {
  278. dev_err(tdma->dev, "slave id already in use\n");
  279. return -EINVAL;
  280. }
  281. break;
  282. case DMA_DEV_TO_MEM:
  283. if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) {
  284. dev_err(tdma->dev, "slave id already in use\n");
  285. return -EINVAL;
  286. }
  287. break;
  288. default:
  289. break;
  290. }
  291. tdc->sid_dir = direction;
  292. return 0;
  293. }
  294. static void tegra_dma_sid_free(struct tegra_dma_channel *tdc)
  295. {
  296. struct tegra_dma *tdma = tdc->tdma;
  297. int sid = tdc->slave_id;
  298. switch (tdc->sid_dir) {
  299. case DMA_MEM_TO_DEV:
  300. clear_bit(sid, &tdma->sid_m2d_reserved);
  301. break;
  302. case DMA_DEV_TO_MEM:
  303. clear_bit(sid, &tdma->sid_d2m_reserved);
  304. break;
  305. default:
  306. break;
  307. }
  308. tdc->sid_dir = DMA_TRANS_NONE;
  309. }
  310. static void tegra_dma_desc_free(struct virt_dma_desc *vd)
  311. {
  312. kfree(container_of(vd, struct tegra_dma_desc, vd));
  313. }
  314. static int tegra_dma_slave_config(struct dma_chan *dc,
  315. struct dma_slave_config *sconfig)
  316. {
  317. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  318. memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
  319. tdc->config_init = true;
  320. return 0;
  321. }
  322. static int tegra_dma_pause(struct tegra_dma_channel *tdc)
  323. {
  324. int ret;
  325. u32 val;
  326. val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
  327. val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
  328. tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
  329. /* Wait until busy bit is de-asserted */
  330. ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
  331. tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
  332. val,
  333. !(val & TEGRA_GPCDMA_STATUS_BUSY),
  334. TEGRA_GPCDMA_BURST_COMPLETE_TIME,
  335. TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
  336. if (ret) {
  337. dev_err(tdc2dev(tdc), "DMA pause timed out\n");
  338. tegra_dma_dump_chan_regs(tdc);
  339. }
  340. return ret;
  341. }
  342. static int tegra_dma_device_pause(struct dma_chan *dc)
  343. {
  344. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  345. unsigned long flags;
  346. int ret;
  347. if (!tdc->tdma->chip_data->hw_support_pause)
  348. return -ENOSYS;
  349. spin_lock_irqsave(&tdc->vc.lock, flags);
  350. ret = tegra_dma_pause(tdc);
  351. spin_unlock_irqrestore(&tdc->vc.lock, flags);
  352. return ret;
  353. }
  354. static void tegra_dma_resume(struct tegra_dma_channel *tdc)
  355. {
  356. u32 val;
  357. val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
  358. val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
  359. tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
  360. }
  361. static int tegra_dma_device_resume(struct dma_chan *dc)
  362. {
  363. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  364. unsigned long flags;
  365. if (!tdc->tdma->chip_data->hw_support_pause)
  366. return -ENOSYS;
  367. spin_lock_irqsave(&tdc->vc.lock, flags);
  368. tegra_dma_resume(tdc);
  369. spin_unlock_irqrestore(&tdc->vc.lock, flags);
  370. return 0;
  371. }
  372. static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc)
  373. {
  374. /* Return 0 irrespective of PAUSE status.
  375. * This is useful to recover channels that can exit out of flush
  376. * state when the channel is disabled.
  377. */
  378. tegra_dma_pause(tdc);
  379. return 0;
  380. }
  381. static void tegra_dma_disable(struct tegra_dma_channel *tdc)
  382. {
  383. u32 csr, status;
  384. csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
  385. /* Disable interrupts */
  386. csr &= ~TEGRA_GPCDMA_CSR_IE_EOC;
  387. /* Disable DMA */
  388. csr &= ~TEGRA_GPCDMA_CSR_ENB;
  389. tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
  390. /* Clear interrupt status if it is there */
  391. status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
  392. if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
  393. dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
  394. tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status);
  395. }
  396. }
  397. static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc)
  398. {
  399. struct tegra_dma_desc *dma_desc = tdc->dma_desc;
  400. struct tegra_dma_channel_regs *ch_regs;
  401. int ret;
  402. u32 val;
  403. dma_desc->sg_idx++;
  404. /* Reset the sg index for cyclic transfers */
  405. if (dma_desc->sg_idx == dma_desc->sg_count)
  406. dma_desc->sg_idx = 0;
  407. /* Configure next transfer immediately after DMA is busy */
  408. ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
  409. tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
  410. val,
  411. (val & TEGRA_GPCDMA_STATUS_BUSY), 0,
  412. TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
  413. if (ret)
  414. return;
  415. ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
  416. tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
  417. tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
  418. tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
  419. tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
  420. /* Start DMA */
  421. tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
  422. ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
  423. }
  424. static void tegra_dma_start(struct tegra_dma_channel *tdc)
  425. {
  426. struct tegra_dma_desc *dma_desc = tdc->dma_desc;
  427. struct tegra_dma_channel_regs *ch_regs;
  428. struct virt_dma_desc *vdesc;
  429. if (!dma_desc) {
  430. vdesc = vchan_next_desc(&tdc->vc);
  431. if (!vdesc)
  432. return;
  433. dma_desc = vd_to_tegra_dma_desc(vdesc);
  434. list_del(&vdesc->node);
  435. dma_desc->tdc = tdc;
  436. tdc->dma_desc = dma_desc;
  437. tegra_dma_resume(tdc);
  438. }
  439. ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
  440. tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
  441. tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0);
  442. tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
  443. tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
  444. tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
  445. tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern);
  446. tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq);
  447. tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq);
  448. tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr);
  449. /* Start DMA */
  450. tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
  451. ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
  452. }
  453. static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
  454. {
  455. vchan_cookie_complete(&tdc->dma_desc->vd);
  456. tegra_dma_sid_free(tdc);
  457. tdc->dma_desc = NULL;
  458. }
  459. static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
  460. unsigned int err_status)
  461. {
  462. switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) {
  463. case TEGRA_DMA_BM_FIFO_FULL_ERR:
  464. dev_err(tdc->tdma->dev,
  465. "GPCDMA CH%d bm fifo full\n", tdc->id);
  466. break;
  467. case TEGRA_DMA_PERIPH_FIFO_FULL_ERR:
  468. dev_err(tdc->tdma->dev,
  469. "GPCDMA CH%d peripheral fifo full\n", tdc->id);
  470. break;
  471. case TEGRA_DMA_PERIPH_ID_ERR:
  472. dev_err(tdc->tdma->dev,
  473. "GPCDMA CH%d illegal peripheral id\n", tdc->id);
  474. break;
  475. case TEGRA_DMA_STREAM_ID_ERR:
  476. dev_err(tdc->tdma->dev,
  477. "GPCDMA CH%d illegal stream id\n", tdc->id);
  478. break;
  479. case TEGRA_DMA_MC_SLAVE_ERR:
  480. dev_err(tdc->tdma->dev,
  481. "GPCDMA CH%d mc slave error\n", tdc->id);
  482. break;
  483. case TEGRA_DMA_MMIO_SLAVE_ERR:
  484. dev_err(tdc->tdma->dev,
  485. "GPCDMA CH%d mmio slave error\n", tdc->id);
  486. break;
  487. default:
  488. dev_err(tdc->tdma->dev,
  489. "GPCDMA CH%d security violation %x\n", tdc->id,
  490. err_status);
  491. }
  492. }
  493. static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
  494. {
  495. struct tegra_dma_channel *tdc = dev_id;
  496. struct tegra_dma_desc *dma_desc = tdc->dma_desc;
  497. struct tegra_dma_sg_req *sg_req;
  498. u32 status;
  499. /* Check channel error status register */
  500. status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS);
  501. if (status) {
  502. tegra_dma_chan_decode_error(tdc, status);
  503. tegra_dma_dump_chan_regs(tdc);
  504. tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF);
  505. }
  506. spin_lock(&tdc->vc.lock);
  507. status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
  508. if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC))
  509. goto irq_done;
  510. tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS,
  511. TEGRA_GPCDMA_STATUS_ISE_EOC);
  512. if (!dma_desc)
  513. goto irq_done;
  514. sg_req = dma_desc->sg_req;
  515. dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len;
  516. if (dma_desc->cyclic) {
  517. vchan_cyclic_callback(&dma_desc->vd);
  518. tegra_dma_configure_next_sg(tdc);
  519. } else {
  520. dma_desc->sg_idx++;
  521. if (dma_desc->sg_idx == dma_desc->sg_count)
  522. tegra_dma_xfer_complete(tdc);
  523. else
  524. tegra_dma_start(tdc);
  525. }
  526. irq_done:
  527. spin_unlock(&tdc->vc.lock);
  528. return IRQ_HANDLED;
  529. }
  530. static void tegra_dma_issue_pending(struct dma_chan *dc)
  531. {
  532. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  533. unsigned long flags;
  534. if (tdc->dma_desc)
  535. return;
  536. spin_lock_irqsave(&tdc->vc.lock, flags);
  537. if (vchan_issue_pending(&tdc->vc))
  538. tegra_dma_start(tdc);
  539. /*
  540. * For cyclic DMA transfers, program the second
  541. * transfer parameters as soon as the first DMA
  542. * transfer is started inorder for the DMA
  543. * controller to trigger the second transfer
  544. * with the correct parameters.
  545. */
  546. if (tdc->dma_desc && tdc->dma_desc->cyclic)
  547. tegra_dma_configure_next_sg(tdc);
  548. spin_unlock_irqrestore(&tdc->vc.lock, flags);
  549. }
  550. static int tegra_dma_stop_client(struct tegra_dma_channel *tdc)
  551. {
  552. int ret;
  553. u32 status, csr;
  554. /*
  555. * Change the client associated with the DMA channel
  556. * to stop DMA engine from starting any more bursts for
  557. * the given client and wait for in flight bursts to complete
  558. */
  559. csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
  560. csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK);
  561. csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED;
  562. tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
  563. /* Wait for in flight data transfer to finish */
  564. udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME);
  565. /* If TX/RX path is still active wait till it becomes
  566. * inactive
  567. */
  568. ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
  569. tdc->chan_base_offset +
  570. TEGRA_GPCDMA_CHAN_STATUS,
  571. status,
  572. !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX |
  573. TEGRA_GPCDMA_STATUS_CHANNEL_RX)),
  574. 5,
  575. TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
  576. if (ret) {
  577. dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n");
  578. tegra_dma_dump_chan_regs(tdc);
  579. }
  580. return ret;
  581. }
  582. static int tegra_dma_terminate_all(struct dma_chan *dc)
  583. {
  584. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  585. unsigned long flags;
  586. LIST_HEAD(head);
  587. int err;
  588. spin_lock_irqsave(&tdc->vc.lock, flags);
  589. if (tdc->dma_desc) {
  590. err = tdc->tdma->chip_data->terminate(tdc);
  591. if (err) {
  592. spin_unlock_irqrestore(&tdc->vc.lock, flags);
  593. return err;
  594. }
  595. vchan_terminate_vdesc(&tdc->dma_desc->vd);
  596. tegra_dma_disable(tdc);
  597. tdc->dma_desc = NULL;
  598. }
  599. tegra_dma_sid_free(tdc);
  600. vchan_get_all_descriptors(&tdc->vc, &head);
  601. spin_unlock_irqrestore(&tdc->vc.lock, flags);
  602. vchan_dma_desc_free_list(&tdc->vc, &head);
  603. return 0;
  604. }
  605. static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
  606. {
  607. struct tegra_dma_desc *dma_desc = tdc->dma_desc;
  608. struct tegra_dma_sg_req *sg_req = dma_desc->sg_req;
  609. unsigned int bytes_xfer, residual;
  610. u32 wcount = 0, status;
  611. wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
  612. /*
  613. * Set wcount = 0 if EOC bit is set. The transfer would have
  614. * already completed and the CHAN_XFER_COUNT could have updated
  615. * for the next transfer, specifically in case of cyclic transfers.
  616. */
  617. status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
  618. if (status & TEGRA_GPCDMA_STATUS_ISE_EOC)
  619. wcount = 0;
  620. bytes_xfer = dma_desc->bytes_xfer +
  621. sg_req[dma_desc->sg_idx].len - (wcount * 4);
  622. residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
  623. return residual;
  624. }
  625. static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
  626. dma_cookie_t cookie,
  627. struct dma_tx_state *txstate)
  628. {
  629. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  630. struct tegra_dma_desc *dma_desc;
  631. struct virt_dma_desc *vd;
  632. unsigned int residual;
  633. unsigned long flags;
  634. enum dma_status ret;
  635. ret = dma_cookie_status(dc, cookie, txstate);
  636. if (ret == DMA_COMPLETE)
  637. return ret;
  638. spin_lock_irqsave(&tdc->vc.lock, flags);
  639. vd = vchan_find_desc(&tdc->vc, cookie);
  640. if (vd) {
  641. dma_desc = vd_to_tegra_dma_desc(vd);
  642. residual = dma_desc->bytes_req;
  643. dma_set_residue(txstate, residual);
  644. } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) {
  645. residual = tegra_dma_get_residual(tdc);
  646. dma_set_residue(txstate, residual);
  647. } else {
  648. dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie);
  649. }
  650. spin_unlock_irqrestore(&tdc->vc.lock, flags);
  651. return ret;
  652. }
  653. static inline int get_bus_width(struct tegra_dma_channel *tdc,
  654. enum dma_slave_buswidth slave_bw)
  655. {
  656. switch (slave_bw) {
  657. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  658. return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8;
  659. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  660. return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16;
  661. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  662. return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32;
  663. default:
  664. dev_err(tdc2dev(tdc), "given slave bus width is not supported\n");
  665. return -EINVAL;
  666. }
  667. }
  668. static unsigned int get_burst_size(struct tegra_dma_channel *tdc,
  669. u32 burst_size, enum dma_slave_buswidth slave_bw,
  670. int len)
  671. {
  672. unsigned int burst_mmio_width, burst_byte;
  673. /*
  674. * burst_size from client is in terms of the bus_width.
  675. * convert that into words.
  676. * If burst_size is not specified from client, then use
  677. * len to calculate the optimum burst size
  678. */
  679. burst_byte = burst_size ? burst_size * slave_bw : len;
  680. burst_mmio_width = burst_byte / 4;
  681. if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN)
  682. return 0;
  683. burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX);
  684. return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width);
  685. }
  686. static int get_transfer_param(struct tegra_dma_channel *tdc,
  687. enum dma_transfer_direction direction,
  688. u32 *apb_addr,
  689. u32 *mmio_seq,
  690. u32 *csr,
  691. unsigned int *burst_size,
  692. enum dma_slave_buswidth *slave_bw)
  693. {
  694. switch (direction) {
  695. case DMA_MEM_TO_DEV:
  696. *apb_addr = tdc->dma_sconfig.dst_addr;
  697. *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
  698. *burst_size = tdc->dma_sconfig.dst_maxburst;
  699. *slave_bw = tdc->dma_sconfig.dst_addr_width;
  700. *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC;
  701. return 0;
  702. case DMA_DEV_TO_MEM:
  703. *apb_addr = tdc->dma_sconfig.src_addr;
  704. *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
  705. *burst_size = tdc->dma_sconfig.src_maxburst;
  706. *slave_bw = tdc->dma_sconfig.src_addr_width;
  707. *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC;
  708. return 0;
  709. default:
  710. dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
  711. }
  712. return -EINVAL;
  713. }
  714. static struct dma_async_tx_descriptor *
  715. tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value,
  716. size_t len, unsigned long flags)
  717. {
  718. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  719. unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
  720. struct tegra_dma_sg_req *sg_req;
  721. struct tegra_dma_desc *dma_desc;
  722. u32 csr, mc_seq;
  723. if ((len & 3) || (dest & 3) || len > max_dma_count) {
  724. dev_err(tdc2dev(tdc),
  725. "DMA length/memory address is not supported\n");
  726. return NULL;
  727. }
  728. /* Set DMA mode to fixed pattern */
  729. csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT;
  730. /* Enable once or continuous mode */
  731. csr |= TEGRA_GPCDMA_CSR_ONCE;
  732. /* Enable IRQ mask */
  733. csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
  734. /* Enable the DMA interrupt */
  735. if (flags & DMA_PREP_INTERRUPT)
  736. csr |= TEGRA_GPCDMA_CSR_IE_EOC;
  737. /* Configure default priority weight for the channel */
  738. csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
  739. mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
  740. /* retain stream-id and clean rest */
  741. mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
  742. /* Set the address wrapping */
  743. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
  744. TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
  745. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
  746. TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
  747. /* Program outstanding MC requests */
  748. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
  749. /* Set burst size */
  750. mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
  751. dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
  752. if (!dma_desc)
  753. return NULL;
  754. dma_desc->bytes_req = len;
  755. dma_desc->sg_count = 1;
  756. sg_req = dma_desc->sg_req;
  757. sg_req[0].ch_regs.src_ptr = 0;
  758. sg_req[0].ch_regs.dst_ptr = dest;
  759. sg_req[0].ch_regs.high_addr_ptr =
  760. FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
  761. sg_req[0].ch_regs.fixed_pattern = value;
  762. /* Word count reg takes value as (N +1) words */
  763. sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
  764. sg_req[0].ch_regs.csr = csr;
  765. sg_req[0].ch_regs.mmio_seq = 0;
  766. sg_req[0].ch_regs.mc_seq = mc_seq;
  767. sg_req[0].len = len;
  768. dma_desc->cyclic = false;
  769. return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
  770. }
  771. static struct dma_async_tx_descriptor *
  772. tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest,
  773. dma_addr_t src, size_t len, unsigned long flags)
  774. {
  775. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  776. struct tegra_dma_sg_req *sg_req;
  777. struct tegra_dma_desc *dma_desc;
  778. unsigned int max_dma_count;
  779. u32 csr, mc_seq;
  780. max_dma_count = tdc->tdma->chip_data->max_dma_count;
  781. if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) {
  782. dev_err(tdc2dev(tdc),
  783. "DMA length/memory address is not supported\n");
  784. return NULL;
  785. }
  786. /* Set DMA mode to memory to memory transfer */
  787. csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM;
  788. /* Enable once or continuous mode */
  789. csr |= TEGRA_GPCDMA_CSR_ONCE;
  790. /* Enable IRQ mask */
  791. csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
  792. /* Enable the DMA interrupt */
  793. if (flags & DMA_PREP_INTERRUPT)
  794. csr |= TEGRA_GPCDMA_CSR_IE_EOC;
  795. /* Configure default priority weight for the channel */
  796. csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
  797. mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
  798. /* retain stream-id and clean rest */
  799. mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) |
  800. (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
  801. /* Set the address wrapping */
  802. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
  803. TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
  804. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
  805. TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
  806. /* Program outstanding MC requests */
  807. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
  808. /* Set burst size */
  809. mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
  810. dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
  811. if (!dma_desc)
  812. return NULL;
  813. dma_desc->bytes_req = len;
  814. dma_desc->sg_count = 1;
  815. sg_req = dma_desc->sg_req;
  816. sg_req[0].ch_regs.src_ptr = src;
  817. sg_req[0].ch_regs.dst_ptr = dest;
  818. sg_req[0].ch_regs.high_addr_ptr =
  819. FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32));
  820. sg_req[0].ch_regs.high_addr_ptr |=
  821. FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
  822. /* Word count reg takes value as (N +1) words */
  823. sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
  824. sg_req[0].ch_regs.csr = csr;
  825. sg_req[0].ch_regs.mmio_seq = 0;
  826. sg_req[0].ch_regs.mc_seq = mc_seq;
  827. sg_req[0].len = len;
  828. dma_desc->cyclic = false;
  829. return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
  830. }
  831. static struct dma_async_tx_descriptor *
  832. tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl,
  833. unsigned int sg_len, enum dma_transfer_direction direction,
  834. unsigned long flags, void *context)
  835. {
  836. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  837. unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
  838. enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
  839. u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0;
  840. struct tegra_dma_sg_req *sg_req;
  841. struct tegra_dma_desc *dma_desc;
  842. struct scatterlist *sg;
  843. u32 burst_size;
  844. unsigned int i;
  845. int ret;
  846. if (!tdc->config_init) {
  847. dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
  848. return NULL;
  849. }
  850. if (sg_len < 1) {
  851. dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
  852. return NULL;
  853. }
  854. ret = tegra_dma_sid_reserve(tdc, direction);
  855. if (ret)
  856. return NULL;
  857. ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
  858. &burst_size, &slave_bw);
  859. if (ret < 0)
  860. return NULL;
  861. /* Enable once or continuous mode */
  862. csr |= TEGRA_GPCDMA_CSR_ONCE;
  863. /* Program the slave id in requestor select */
  864. csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
  865. /* Enable IRQ mask */
  866. csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
  867. /* Configure default priority weight for the channel*/
  868. csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
  869. /* Enable the DMA interrupt */
  870. if (flags & DMA_PREP_INTERRUPT)
  871. csr |= TEGRA_GPCDMA_CSR_IE_EOC;
  872. mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
  873. /* retain stream-id and clean rest */
  874. mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
  875. /* Set the address wrapping on both MC and MMIO side */
  876. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
  877. TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
  878. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
  879. TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
  880. mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
  881. /* Program 2 MC outstanding requests by default. */
  882. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
  883. /* Setting MC burst size depending on MMIO burst size */
  884. if (burst_size == 64)
  885. mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
  886. else
  887. mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
  888. dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT);
  889. if (!dma_desc)
  890. return NULL;
  891. dma_desc->sg_count = sg_len;
  892. sg_req = dma_desc->sg_req;
  893. /* Make transfer requests */
  894. for_each_sg(sgl, sg, sg_len, i) {
  895. u32 len;
  896. dma_addr_t mem;
  897. mem = sg_dma_address(sg);
  898. len = sg_dma_len(sg);
  899. if ((len & 3) || (mem & 3) || len > max_dma_count) {
  900. dev_err(tdc2dev(tdc),
  901. "DMA length/memory address is not supported\n");
  902. kfree(dma_desc);
  903. return NULL;
  904. }
  905. mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
  906. dma_desc->bytes_req += len;
  907. if (direction == DMA_MEM_TO_DEV) {
  908. sg_req[i].ch_regs.src_ptr = mem;
  909. sg_req[i].ch_regs.dst_ptr = apb_ptr;
  910. sg_req[i].ch_regs.high_addr_ptr =
  911. FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
  912. } else if (direction == DMA_DEV_TO_MEM) {
  913. sg_req[i].ch_regs.src_ptr = apb_ptr;
  914. sg_req[i].ch_regs.dst_ptr = mem;
  915. sg_req[i].ch_regs.high_addr_ptr =
  916. FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
  917. }
  918. /*
  919. * Word count register takes input in words. Writing a value
  920. * of N into word count register means a req of (N+1) words.
  921. */
  922. sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
  923. sg_req[i].ch_regs.csr = csr;
  924. sg_req[i].ch_regs.mmio_seq = mmio_seq;
  925. sg_req[i].ch_regs.mc_seq = mc_seq;
  926. sg_req[i].len = len;
  927. }
  928. dma_desc->cyclic = false;
  929. return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
  930. }
  931. static struct dma_async_tx_descriptor *
  932. tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
  933. size_t period_len, enum dma_transfer_direction direction,
  934. unsigned long flags)
  935. {
  936. enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
  937. u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size;
  938. unsigned int max_dma_count, len, period_count, i;
  939. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  940. struct tegra_dma_desc *dma_desc;
  941. struct tegra_dma_sg_req *sg_req;
  942. dma_addr_t mem = buf_addr;
  943. int ret;
  944. if (!buf_len || !period_len) {
  945. dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
  946. return NULL;
  947. }
  948. if (!tdc->config_init) {
  949. dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
  950. return NULL;
  951. }
  952. ret = tegra_dma_sid_reserve(tdc, direction);
  953. if (ret)
  954. return NULL;
  955. /*
  956. * We only support cycle transfer when buf_len is multiple of
  957. * period_len.
  958. */
  959. if (buf_len % period_len) {
  960. dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
  961. return NULL;
  962. }
  963. len = period_len;
  964. max_dma_count = tdc->tdma->chip_data->max_dma_count;
  965. if ((len & 3) || (buf_addr & 3) || len > max_dma_count) {
  966. dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
  967. return NULL;
  968. }
  969. ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
  970. &burst_size, &slave_bw);
  971. if (ret < 0)
  972. return NULL;
  973. /* Enable once or continuous mode */
  974. csr &= ~TEGRA_GPCDMA_CSR_ONCE;
  975. /* Program the slave id in requestor select */
  976. csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
  977. /* Enable IRQ mask */
  978. csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
  979. /* Configure default priority weight for the channel*/
  980. csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
  981. /* Enable the DMA interrupt */
  982. if (flags & DMA_PREP_INTERRUPT)
  983. csr |= TEGRA_GPCDMA_CSR_IE_EOC;
  984. mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
  985. mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
  986. /* retain stream-id and clean rest */
  987. mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
  988. /* Set the address wrapping on both MC and MMIO side */
  989. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
  990. TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
  991. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
  992. TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
  993. /* Program 2 MC outstanding requests by default. */
  994. mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
  995. /* Setting MC burst size depending on MMIO burst size */
  996. if (burst_size == 64)
  997. mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
  998. else
  999. mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
  1000. period_count = buf_len / period_len;
  1001. dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count),
  1002. GFP_NOWAIT);
  1003. if (!dma_desc)
  1004. return NULL;
  1005. dma_desc->bytes_req = buf_len;
  1006. dma_desc->sg_count = period_count;
  1007. sg_req = dma_desc->sg_req;
  1008. /* Split transfer equal to period size */
  1009. for (i = 0; i < period_count; i++) {
  1010. mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
  1011. if (direction == DMA_MEM_TO_DEV) {
  1012. sg_req[i].ch_regs.src_ptr = mem;
  1013. sg_req[i].ch_regs.dst_ptr = apb_ptr;
  1014. sg_req[i].ch_regs.high_addr_ptr =
  1015. FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
  1016. } else if (direction == DMA_DEV_TO_MEM) {
  1017. sg_req[i].ch_regs.src_ptr = apb_ptr;
  1018. sg_req[i].ch_regs.dst_ptr = mem;
  1019. sg_req[i].ch_regs.high_addr_ptr =
  1020. FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
  1021. }
  1022. /*
  1023. * Word count register takes input in words. Writing a value
  1024. * of N into word count register means a req of (N+1) words.
  1025. */
  1026. sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
  1027. sg_req[i].ch_regs.csr = csr;
  1028. sg_req[i].ch_regs.mmio_seq = mmio_seq;
  1029. sg_req[i].ch_regs.mc_seq = mc_seq;
  1030. sg_req[i].len = len;
  1031. mem += len;
  1032. }
  1033. dma_desc->cyclic = true;
  1034. return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
  1035. }
  1036. static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
  1037. {
  1038. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  1039. int ret;
  1040. ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
  1041. if (ret) {
  1042. dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name);
  1043. return ret;
  1044. }
  1045. dma_cookie_init(&tdc->vc.chan);
  1046. tdc->config_init = false;
  1047. return 0;
  1048. }
  1049. static void tegra_dma_chan_synchronize(struct dma_chan *dc)
  1050. {
  1051. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  1052. synchronize_irq(tdc->irq);
  1053. vchan_synchronize(&tdc->vc);
  1054. }
  1055. static void tegra_dma_free_chan_resources(struct dma_chan *dc)
  1056. {
  1057. struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  1058. dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
  1059. tegra_dma_terminate_all(dc);
  1060. synchronize_irq(tdc->irq);
  1061. tasklet_kill(&tdc->vc.task);
  1062. tdc->config_init = false;
  1063. tdc->slave_id = -1;
  1064. tdc->sid_dir = DMA_TRANS_NONE;
  1065. free_irq(tdc->irq, tdc);
  1066. vchan_free_chan_resources(&tdc->vc);
  1067. }
  1068. static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
  1069. struct of_dma *ofdma)
  1070. {
  1071. struct tegra_dma *tdma = ofdma->of_dma_data;
  1072. struct tegra_dma_channel *tdc;
  1073. struct dma_chan *chan;
  1074. chan = dma_get_any_slave_channel(&tdma->dma_dev);
  1075. if (!chan)
  1076. return NULL;
  1077. tdc = to_tegra_dma_chan(chan);
  1078. tdc->slave_id = dma_spec->args[0];
  1079. return chan;
  1080. }
  1081. static const struct tegra_dma_chip_data tegra186_dma_chip_data = {
  1082. .nr_channels = 31,
  1083. .channel_reg_size = SZ_64K,
  1084. .max_dma_count = SZ_1G,
  1085. .hw_support_pause = false,
  1086. .terminate = tegra_dma_stop_client,
  1087. };
  1088. static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
  1089. .nr_channels = 31,
  1090. .channel_reg_size = SZ_64K,
  1091. .max_dma_count = SZ_1G,
  1092. .hw_support_pause = true,
  1093. .terminate = tegra_dma_pause,
  1094. };
  1095. static const struct tegra_dma_chip_data tegra234_dma_chip_data = {
  1096. .nr_channels = 31,
  1097. .channel_reg_size = SZ_64K,
  1098. .max_dma_count = SZ_1G,
  1099. .hw_support_pause = true,
  1100. .terminate = tegra_dma_pause_noerr,
  1101. };
  1102. static const struct of_device_id tegra_dma_of_match[] = {
  1103. {
  1104. .compatible = "nvidia,tegra186-gpcdma",
  1105. .data = &tegra186_dma_chip_data,
  1106. }, {
  1107. .compatible = "nvidia,tegra194-gpcdma",
  1108. .data = &tegra194_dma_chip_data,
  1109. }, {
  1110. .compatible = "nvidia,tegra234-gpcdma",
  1111. .data = &tegra234_dma_chip_data,
  1112. }, {
  1113. },
  1114. };
  1115. MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
  1116. static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
  1117. {
  1118. unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
  1119. reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK);
  1120. reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
  1121. reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id);
  1122. reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id);
  1123. tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val);
  1124. return 0;
  1125. }
  1126. static int tegra_dma_probe(struct platform_device *pdev)
  1127. {
  1128. const struct tegra_dma_chip_data *cdata = NULL;
  1129. struct iommu_fwspec *iommu_spec;
  1130. unsigned int stream_id, i;
  1131. struct tegra_dma *tdma;
  1132. int ret;
  1133. cdata = of_device_get_match_data(&pdev->dev);
  1134. tdma = devm_kzalloc(&pdev->dev,
  1135. struct_size(tdma, channels, cdata->nr_channels),
  1136. GFP_KERNEL);
  1137. if (!tdma)
  1138. return -ENOMEM;
  1139. tdma->dev = &pdev->dev;
  1140. tdma->chip_data = cdata;
  1141. platform_set_drvdata(pdev, tdma);
  1142. tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
  1143. if (IS_ERR(tdma->base_addr))
  1144. return PTR_ERR(tdma->base_addr);
  1145. tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma");
  1146. if (IS_ERR(tdma->rst)) {
  1147. return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst),
  1148. "Missing controller reset\n");
  1149. }
  1150. reset_control_reset(tdma->rst);
  1151. tdma->dma_dev.dev = &pdev->dev;
  1152. iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
  1153. if (!iommu_spec) {
  1154. dev_err(&pdev->dev, "Missing iommu stream-id\n");
  1155. return -EINVAL;
  1156. }
  1157. stream_id = iommu_spec->ids[0] & 0xffff;
  1158. INIT_LIST_HEAD(&tdma->dma_dev.channels);
  1159. for (i = 0; i < cdata->nr_channels; i++) {
  1160. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1161. tdc->irq = platform_get_irq(pdev, i);
  1162. if (tdc->irq < 0)
  1163. return tdc->irq;
  1164. tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET +
  1165. i * cdata->channel_reg_size;
  1166. snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i);
  1167. tdc->tdma = tdma;
  1168. tdc->id = i;
  1169. tdc->slave_id = -1;
  1170. vchan_init(&tdc->vc, &tdma->dma_dev);
  1171. tdc->vc.desc_free = tegra_dma_desc_free;
  1172. /* program stream-id for this channel */
  1173. tegra_dma_program_sid(tdc, stream_id);
  1174. tdc->stream_id = stream_id;
  1175. }
  1176. dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
  1177. dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
  1178. dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask);
  1179. dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask);
  1180. dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
  1181. /*
  1182. * Only word aligned transfers are supported. Set the copy
  1183. * alignment shift.
  1184. */
  1185. tdma->dma_dev.copy_align = 2;
  1186. tdma->dma_dev.fill_align = 2;
  1187. tdma->dma_dev.device_alloc_chan_resources =
  1188. tegra_dma_alloc_chan_resources;
  1189. tdma->dma_dev.device_free_chan_resources =
  1190. tegra_dma_free_chan_resources;
  1191. tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
  1192. tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy;
  1193. tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset;
  1194. tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
  1195. tdma->dma_dev.device_config = tegra_dma_slave_config;
  1196. tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
  1197. tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
  1198. tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
  1199. tdma->dma_dev.device_pause = tegra_dma_device_pause;
  1200. tdma->dma_dev.device_resume = tegra_dma_device_resume;
  1201. tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize;
  1202. tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1203. ret = dma_async_device_register(&tdma->dma_dev);
  1204. if (ret < 0) {
  1205. dev_err_probe(&pdev->dev, ret,
  1206. "GPC DMA driver registration failed\n");
  1207. return ret;
  1208. }
  1209. ret = of_dma_controller_register(pdev->dev.of_node,
  1210. tegra_dma_of_xlate, tdma);
  1211. if (ret < 0) {
  1212. dev_err_probe(&pdev->dev, ret,
  1213. "GPC DMA OF registration failed\n");
  1214. dma_async_device_unregister(&tdma->dma_dev);
  1215. return ret;
  1216. }
  1217. dev_info(&pdev->dev, "GPC DMA driver register %d channels\n",
  1218. cdata->nr_channels);
  1219. return 0;
  1220. }
  1221. static int tegra_dma_remove(struct platform_device *pdev)
  1222. {
  1223. struct tegra_dma *tdma = platform_get_drvdata(pdev);
  1224. of_dma_controller_free(pdev->dev.of_node);
  1225. dma_async_device_unregister(&tdma->dma_dev);
  1226. return 0;
  1227. }
  1228. static int __maybe_unused tegra_dma_pm_suspend(struct device *dev)
  1229. {
  1230. struct tegra_dma *tdma = dev_get_drvdata(dev);
  1231. unsigned int i;
  1232. for (i = 0; i < tdma->chip_data->nr_channels; i++) {
  1233. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1234. if (tdc->dma_desc) {
  1235. dev_err(tdma->dev, "channel %u busy\n", i);
  1236. return -EBUSY;
  1237. }
  1238. }
  1239. return 0;
  1240. }
  1241. static int __maybe_unused tegra_dma_pm_resume(struct device *dev)
  1242. {
  1243. struct tegra_dma *tdma = dev_get_drvdata(dev);
  1244. unsigned int i;
  1245. reset_control_reset(tdma->rst);
  1246. for (i = 0; i < tdma->chip_data->nr_channels; i++) {
  1247. struct tegra_dma_channel *tdc = &tdma->channels[i];
  1248. tegra_dma_program_sid(tdc, tdc->stream_id);
  1249. }
  1250. return 0;
  1251. }
  1252. static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
  1253. SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
  1254. };
  1255. static struct platform_driver tegra_dma_driver = {
  1256. .driver = {
  1257. .name = "tegra-gpcdma",
  1258. .pm = &tegra_dma_dev_pm_ops,
  1259. .of_match_table = tegra_dma_of_match,
  1260. },
  1261. .probe = tegra_dma_probe,
  1262. .remove = tegra_dma_remove,
  1263. };
  1264. module_platform_driver(tegra_dma_driver);
  1265. MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver");
  1266. MODULE_AUTHOR("Pavan Kunapuli <[email protected]>");
  1267. MODULE_AUTHOR("Rajesh Gumasta <[email protected]>");
  1268. MODULE_LICENSE("GPL");