zynqmp_dma.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * DMA driver for Xilinx ZynqMP DMA Engine
  4. *
  5. * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
  6. */
  7. #include <linux/bitops.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/init.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/module.h>
  13. #include <linux/of_dma.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/slab.h>
  16. #include <linux/clk.h>
  17. #include <linux/io-64-nonatomic-lo-hi.h>
  18. #include <linux/pm_runtime.h>
  19. #include "../dmaengine.h"
  20. /* Register Offsets */
  21. #define ZYNQMP_DMA_ISR 0x100
  22. #define ZYNQMP_DMA_IMR 0x104
  23. #define ZYNQMP_DMA_IER 0x108
  24. #define ZYNQMP_DMA_IDS 0x10C
  25. #define ZYNQMP_DMA_CTRL0 0x110
  26. #define ZYNQMP_DMA_CTRL1 0x114
  27. #define ZYNQMP_DMA_DATA_ATTR 0x120
  28. #define ZYNQMP_DMA_DSCR_ATTR 0x124
  29. #define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128
  30. #define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C
  31. #define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130
  32. #define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134
  33. #define ZYNQMP_DMA_DST_DSCR_WRD0 0x138
  34. #define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C
  35. #define ZYNQMP_DMA_DST_DSCR_WRD2 0x140
  36. #define ZYNQMP_DMA_DST_DSCR_WRD3 0x144
  37. #define ZYNQMP_DMA_SRC_START_LSB 0x158
  38. #define ZYNQMP_DMA_SRC_START_MSB 0x15C
  39. #define ZYNQMP_DMA_DST_START_LSB 0x160
  40. #define ZYNQMP_DMA_DST_START_MSB 0x164
  41. #define ZYNQMP_DMA_TOTAL_BYTE 0x188
  42. #define ZYNQMP_DMA_RATE_CTRL 0x18C
  43. #define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
  44. #define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
  45. #define ZYNQMP_DMA_CTRL2 0x200
  46. /* Interrupt registers bit field definitions */
  47. #define ZYNQMP_DMA_DONE BIT(10)
  48. #define ZYNQMP_DMA_AXI_WR_DATA BIT(9)
  49. #define ZYNQMP_DMA_AXI_RD_DATA BIT(8)
  50. #define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7)
  51. #define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6)
  52. #define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5)
  53. #define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4)
  54. #define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3)
  55. #define ZYNQMP_DMA_DST_DSCR_DONE BIT(2)
  56. #define ZYNQMP_DMA_INV_APB BIT(0)
  57. /* Control 0 register bit field definitions */
  58. #define ZYNQMP_DMA_OVR_FETCH BIT(7)
  59. #define ZYNQMP_DMA_POINT_TYPE_SG BIT(6)
  60. #define ZYNQMP_DMA_RATE_CTRL_EN BIT(3)
  61. /* Control 1 register bit field definitions */
  62. #define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0)
  63. /* Data Attribute register bit field definitions */
  64. #define ZYNQMP_DMA_ARBURST GENMASK(27, 26)
  65. #define ZYNQMP_DMA_ARCACHE GENMASK(25, 22)
  66. #define ZYNQMP_DMA_ARCACHE_OFST 22
  67. #define ZYNQMP_DMA_ARQOS GENMASK(21, 18)
  68. #define ZYNQMP_DMA_ARQOS_OFST 18
  69. #define ZYNQMP_DMA_ARLEN GENMASK(17, 14)
  70. #define ZYNQMP_DMA_ARLEN_OFST 14
  71. #define ZYNQMP_DMA_AWBURST GENMASK(13, 12)
  72. #define ZYNQMP_DMA_AWCACHE GENMASK(11, 8)
  73. #define ZYNQMP_DMA_AWCACHE_OFST 8
  74. #define ZYNQMP_DMA_AWQOS GENMASK(7, 4)
  75. #define ZYNQMP_DMA_AWQOS_OFST 4
  76. #define ZYNQMP_DMA_AWLEN GENMASK(3, 0)
  77. #define ZYNQMP_DMA_AWLEN_OFST 0
  78. /* Descriptor Attribute register bit field definitions */
  79. #define ZYNQMP_DMA_AXCOHRNT BIT(8)
  80. #define ZYNQMP_DMA_AXCACHE GENMASK(7, 4)
  81. #define ZYNQMP_DMA_AXCACHE_OFST 4
  82. #define ZYNQMP_DMA_AXQOS GENMASK(3, 0)
  83. #define ZYNQMP_DMA_AXQOS_OFST 0
  84. /* Control register 2 bit field definitions */
  85. #define ZYNQMP_DMA_ENABLE BIT(0)
  86. /* Buffer Descriptor definitions */
  87. #define ZYNQMP_DMA_DESC_CTRL_STOP 0x10
  88. #define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4
  89. #define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2
  90. #define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1
  91. /* Interrupt Mask specific definitions */
  92. #define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \
  93. ZYNQMP_DMA_AXI_WR_DATA | \
  94. ZYNQMP_DMA_AXI_RD_DST_DSCR | \
  95. ZYNQMP_DMA_AXI_RD_SRC_DSCR | \
  96. ZYNQMP_DMA_INV_APB)
  97. #define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \
  98. ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \
  99. ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
  100. #define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE)
  101. #define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \
  102. ZYNQMP_DMA_INT_ERR | \
  103. ZYNQMP_DMA_INT_OVRFL | \
  104. ZYNQMP_DMA_DST_DSCR_DONE)
  105. /* Max number of descriptors per channel */
  106. #define ZYNQMP_DMA_NUM_DESCS 32
  107. /* Max transfer size per descriptor */
  108. #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
  109. /* Max burst lengths */
  110. #define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
  111. #define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
  112. /* Reset values for data attributes */
  113. #define ZYNQMP_DMA_AXCACHE_VAL 0xF
  114. #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
  115. #define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF
  116. /* Bus width in bits */
  117. #define ZYNQMP_DMA_BUS_WIDTH_64 64
  118. #define ZYNQMP_DMA_BUS_WIDTH_128 128
  119. #define ZDMA_PM_TIMEOUT 100
  120. #define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
  121. #define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
  122. common)
  123. #define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \
  124. async_tx)
  125. /**
  126. * struct zynqmp_dma_desc_ll - Hw linked list descriptor
  127. * @addr: Buffer address
  128. * @size: Size of the buffer
  129. * @ctrl: Control word
  130. * @nxtdscraddr: Next descriptor base address
  131. * @rsvd: Reserved field and for Hw internal use.
  132. */
  133. struct zynqmp_dma_desc_ll {
  134. u64 addr;
  135. u32 size;
  136. u32 ctrl;
  137. u64 nxtdscraddr;
  138. u64 rsvd;
  139. };
  140. /**
  141. * struct zynqmp_dma_desc_sw - Per Transaction structure
  142. * @src: Source address for simple mode dma
  143. * @dst: Destination address for simple mode dma
  144. * @len: Transfer length for simple mode dma
  145. * @node: Node in the channel descriptor list
  146. * @tx_list: List head for the current transfer
  147. * @async_tx: Async transaction descriptor
  148. * @src_v: Virtual address of the src descriptor
  149. * @src_p: Physical address of the src descriptor
  150. * @dst_v: Virtual address of the dst descriptor
  151. * @dst_p: Physical address of the dst descriptor
  152. */
  153. struct zynqmp_dma_desc_sw {
  154. u64 src;
  155. u64 dst;
  156. u32 len;
  157. struct list_head node;
  158. struct list_head tx_list;
  159. struct dma_async_tx_descriptor async_tx;
  160. struct zynqmp_dma_desc_ll *src_v;
  161. dma_addr_t src_p;
  162. struct zynqmp_dma_desc_ll *dst_v;
  163. dma_addr_t dst_p;
  164. };
  165. /**
  166. * struct zynqmp_dma_chan - Driver specific DMA channel structure
  167. * @zdev: Driver specific device structure
  168. * @regs: Control registers offset
  169. * @lock: Descriptor operation lock
  170. * @pending_list: Descriptors waiting
  171. * @free_list: Descriptors free
  172. * @active_list: Descriptors active
  173. * @sw_desc_pool: SW descriptor pool
  174. * @done_list: Complete descriptors
  175. * @common: DMA common channel
  176. * @desc_pool_v: Statically allocated descriptor base
  177. * @desc_pool_p: Physical allocated descriptor base
  178. * @desc_free_cnt: Descriptor available count
  179. * @dev: The dma device
  180. * @irq: Channel IRQ
  181. * @is_dmacoherent: Tells whether dma operations are coherent or not
  182. * @tasklet: Cleanup work after irq
  183. * @idle : Channel status;
  184. * @desc_size: Size of the low level descriptor
  185. * @err: Channel has errors
  186. * @bus_width: Bus width
  187. * @src_burst_len: Source burst length
  188. * @dst_burst_len: Dest burst length
  189. */
  190. struct zynqmp_dma_chan {
  191. struct zynqmp_dma_device *zdev;
  192. void __iomem *regs;
  193. spinlock_t lock;
  194. struct list_head pending_list;
  195. struct list_head free_list;
  196. struct list_head active_list;
  197. struct zynqmp_dma_desc_sw *sw_desc_pool;
  198. struct list_head done_list;
  199. struct dma_chan common;
  200. void *desc_pool_v;
  201. dma_addr_t desc_pool_p;
  202. u32 desc_free_cnt;
  203. struct device *dev;
  204. int irq;
  205. bool is_dmacoherent;
  206. struct tasklet_struct tasklet;
  207. bool idle;
  208. size_t desc_size;
  209. bool err;
  210. u32 bus_width;
  211. u32 src_burst_len;
  212. u32 dst_burst_len;
  213. };
  214. /**
  215. * struct zynqmp_dma_device - DMA device structure
  216. * @dev: Device Structure
  217. * @common: DMA device structure
  218. * @chan: Driver specific DMA channel
  219. * @clk_main: Pointer to main clock
  220. * @clk_apb: Pointer to apb clock
  221. */
  222. struct zynqmp_dma_device {
  223. struct device *dev;
  224. struct dma_device common;
  225. struct zynqmp_dma_chan *chan;
  226. struct clk *clk_main;
  227. struct clk *clk_apb;
  228. };
  229. static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
  230. u64 value)
  231. {
  232. lo_hi_writeq(value, chan->regs + reg);
  233. }
  234. /**
  235. * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller
  236. * @chan: ZynqMP DMA DMA channel pointer
  237. * @desc: Transaction descriptor pointer
  238. */
  239. static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan,
  240. struct zynqmp_dma_desc_sw *desc)
  241. {
  242. dma_addr_t addr;
  243. addr = desc->src_p;
  244. zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr);
  245. addr = desc->dst_p;
  246. zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr);
  247. }
  248. /**
  249. * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor
  250. * @chan: ZynqMP DMA channel pointer
  251. * @desc: Hw descriptor pointer
  252. */
  253. static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan,
  254. void *desc)
  255. {
  256. struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc;
  257. hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP;
  258. hw++;
  259. hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP;
  260. }
  261. /**
  262. * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor
  263. * @chan: ZynqMP DMA channel pointer
  264. * @sdesc: Hw descriptor pointer
  265. * @src: Source buffer address
  266. * @dst: Destination buffer address
  267. * @len: Transfer length
  268. * @prev: Previous hw descriptor pointer
  269. */
  270. static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan,
  271. struct zynqmp_dma_desc_ll *sdesc,
  272. dma_addr_t src, dma_addr_t dst, size_t len,
  273. struct zynqmp_dma_desc_ll *prev)
  274. {
  275. struct zynqmp_dma_desc_ll *ddesc = sdesc + 1;
  276. sdesc->size = ddesc->size = len;
  277. sdesc->addr = src;
  278. ddesc->addr = dst;
  279. sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256;
  280. if (chan->is_dmacoherent) {
  281. sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
  282. ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
  283. }
  284. if (prev) {
  285. dma_addr_t addr = chan->desc_pool_p +
  286. ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v);
  287. ddesc = prev + 1;
  288. prev->nxtdscraddr = addr;
  289. ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan);
  290. }
  291. }
  292. /**
  293. * zynqmp_dma_init - Initialize the channel
  294. * @chan: ZynqMP DMA channel pointer
  295. */
  296. static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
  297. {
  298. u32 val;
  299. writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
  300. val = readl(chan->regs + ZYNQMP_DMA_ISR);
  301. writel(val, chan->regs + ZYNQMP_DMA_ISR);
  302. if (chan->is_dmacoherent) {
  303. val = ZYNQMP_DMA_AXCOHRNT;
  304. val = (val & ~ZYNQMP_DMA_AXCACHE) |
  305. (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST);
  306. writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR);
  307. }
  308. val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
  309. if (chan->is_dmacoherent) {
  310. val = (val & ~ZYNQMP_DMA_ARCACHE) |
  311. (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST);
  312. val = (val & ~ZYNQMP_DMA_AWCACHE) |
  313. (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST);
  314. }
  315. writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
  316. /* Clearing the interrupt account rgisters */
  317. val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
  318. val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
  319. chan->idle = true;
  320. }
  321. /**
  322. * zynqmp_dma_tx_submit - Submit DMA transaction
  323. * @tx: Async transaction descriptor pointer
  324. *
  325. * Return: cookie value
  326. */
  327. static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  328. {
  329. struct zynqmp_dma_chan *chan = to_chan(tx->chan);
  330. struct zynqmp_dma_desc_sw *desc, *new;
  331. dma_cookie_t cookie;
  332. unsigned long irqflags;
  333. new = tx_to_desc(tx);
  334. spin_lock_irqsave(&chan->lock, irqflags);
  335. cookie = dma_cookie_assign(tx);
  336. if (!list_empty(&chan->pending_list)) {
  337. desc = list_last_entry(&chan->pending_list,
  338. struct zynqmp_dma_desc_sw, node);
  339. if (!list_empty(&desc->tx_list))
  340. desc = list_last_entry(&desc->tx_list,
  341. struct zynqmp_dma_desc_sw, node);
  342. desc->src_v->nxtdscraddr = new->src_p;
  343. desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
  344. desc->dst_v->nxtdscraddr = new->dst_p;
  345. desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
  346. }
  347. list_add_tail(&new->node, &chan->pending_list);
  348. spin_unlock_irqrestore(&chan->lock, irqflags);
  349. return cookie;
  350. }
  351. /**
  352. * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool
  353. * @chan: ZynqMP DMA channel pointer
  354. *
  355. * Return: The sw descriptor
  356. */
  357. static struct zynqmp_dma_desc_sw *
  358. zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
  359. {
  360. struct zynqmp_dma_desc_sw *desc;
  361. unsigned long irqflags;
  362. spin_lock_irqsave(&chan->lock, irqflags);
  363. desc = list_first_entry(&chan->free_list,
  364. struct zynqmp_dma_desc_sw, node);
  365. list_del(&desc->node);
  366. spin_unlock_irqrestore(&chan->lock, irqflags);
  367. INIT_LIST_HEAD(&desc->tx_list);
  368. /* Clear the src and dst descriptor memory */
  369. memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
  370. memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
  371. return desc;
  372. }
  373. /**
  374. * zynqmp_dma_free_descriptor - Issue pending transactions
  375. * @chan: ZynqMP DMA channel pointer
  376. * @sdesc: Transaction descriptor pointer
  377. */
  378. static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
  379. struct zynqmp_dma_desc_sw *sdesc)
  380. {
  381. struct zynqmp_dma_desc_sw *child, *next;
  382. chan->desc_free_cnt++;
  383. list_move_tail(&sdesc->node, &chan->free_list);
  384. list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
  385. chan->desc_free_cnt++;
  386. list_move_tail(&child->node, &chan->free_list);
  387. }
  388. }
  389. /**
  390. * zynqmp_dma_free_desc_list - Free descriptors list
  391. * @chan: ZynqMP DMA channel pointer
  392. * @list: List to parse and delete the descriptor
  393. */
  394. static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan,
  395. struct list_head *list)
  396. {
  397. struct zynqmp_dma_desc_sw *desc, *next;
  398. list_for_each_entry_safe(desc, next, list, node)
  399. zynqmp_dma_free_descriptor(chan, desc);
  400. }
  401. /**
  402. * zynqmp_dma_alloc_chan_resources - Allocate channel resources
  403. * @dchan: DMA channel
  404. *
  405. * Return: Number of descriptors on success and failure value on error
  406. */
  407. static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
  408. {
  409. struct zynqmp_dma_chan *chan = to_chan(dchan);
  410. struct zynqmp_dma_desc_sw *desc;
  411. int i, ret;
  412. ret = pm_runtime_resume_and_get(chan->dev);
  413. if (ret < 0)
  414. return ret;
  415. chan->sw_desc_pool = kcalloc(ZYNQMP_DMA_NUM_DESCS, sizeof(*desc),
  416. GFP_KERNEL);
  417. if (!chan->sw_desc_pool)
  418. return -ENOMEM;
  419. chan->idle = true;
  420. chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS;
  421. INIT_LIST_HEAD(&chan->free_list);
  422. for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
  423. desc = chan->sw_desc_pool + i;
  424. dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
  425. desc->async_tx.tx_submit = zynqmp_dma_tx_submit;
  426. list_add_tail(&desc->node, &chan->free_list);
  427. }
  428. chan->desc_pool_v = dma_alloc_coherent(chan->dev,
  429. (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
  430. ZYNQMP_DMA_NUM_DESCS),
  431. &chan->desc_pool_p, GFP_KERNEL);
  432. if (!chan->desc_pool_v)
  433. return -ENOMEM;
  434. for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
  435. desc = chan->sw_desc_pool + i;
  436. desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v +
  437. (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2));
  438. desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1);
  439. desc->src_p = chan->desc_pool_p +
  440. (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2);
  441. desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan);
  442. }
  443. return ZYNQMP_DMA_NUM_DESCS;
  444. }
  445. /**
  446. * zynqmp_dma_start - Start DMA channel
  447. * @chan: ZynqMP DMA channel pointer
  448. */
  449. static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
  450. {
  451. writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
  452. writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
  453. chan->idle = false;
  454. writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
  455. }
  456. /**
  457. * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt
  458. * @chan: ZynqMP DMA channel pointer
  459. * @status: Interrupt status value
  460. */
  461. static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
  462. {
  463. if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL)
  464. writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
  465. if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
  466. readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
  467. if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
  468. readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
  469. }
  470. static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
  471. {
  472. u32 val, burst_val;
  473. val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
  474. val |= ZYNQMP_DMA_POINT_TYPE_SG;
  475. writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
  476. val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
  477. burst_val = __ilog2_u32(chan->src_burst_len);
  478. val = (val & ~ZYNQMP_DMA_ARLEN) |
  479. ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
  480. burst_val = __ilog2_u32(chan->dst_burst_len);
  481. val = (val & ~ZYNQMP_DMA_AWLEN) |
  482. ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
  483. writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
  484. }
  485. /**
  486. * zynqmp_dma_device_config - Zynqmp dma device configuration
  487. * @dchan: DMA channel
  488. * @config: DMA device config
  489. *
  490. * Return: 0 always
  491. */
  492. static int zynqmp_dma_device_config(struct dma_chan *dchan,
  493. struct dma_slave_config *config)
  494. {
  495. struct zynqmp_dma_chan *chan = to_chan(dchan);
  496. chan->src_burst_len = clamp(config->src_maxburst, 1U,
  497. ZYNQMP_DMA_MAX_SRC_BURST_LEN);
  498. chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
  499. ZYNQMP_DMA_MAX_DST_BURST_LEN);
  500. return 0;
  501. }
  502. /**
  503. * zynqmp_dma_start_transfer - Initiate the new transfer
  504. * @chan: ZynqMP DMA channel pointer
  505. */
  506. static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
  507. {
  508. struct zynqmp_dma_desc_sw *desc;
  509. if (!chan->idle)
  510. return;
  511. zynqmp_dma_config(chan);
  512. desc = list_first_entry_or_null(&chan->pending_list,
  513. struct zynqmp_dma_desc_sw, node);
  514. if (!desc)
  515. return;
  516. list_splice_tail_init(&chan->pending_list, &chan->active_list);
  517. zynqmp_dma_update_desc_to_ctrlr(chan, desc);
  518. zynqmp_dma_start(chan);
  519. }
  520. /**
  521. * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors
  522. * @chan: ZynqMP DMA channel
  523. */
  524. static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
  525. {
  526. struct zynqmp_dma_desc_sw *desc, *next;
  527. unsigned long irqflags;
  528. spin_lock_irqsave(&chan->lock, irqflags);
  529. list_for_each_entry_safe(desc, next, &chan->done_list, node) {
  530. struct dmaengine_desc_callback cb;
  531. dmaengine_desc_get_callback(&desc->async_tx, &cb);
  532. if (dmaengine_desc_callback_valid(&cb)) {
  533. spin_unlock_irqrestore(&chan->lock, irqflags);
  534. dmaengine_desc_callback_invoke(&cb, NULL);
  535. spin_lock_irqsave(&chan->lock, irqflags);
  536. }
  537. /* Run any dependencies, then free the descriptor */
  538. zynqmp_dma_free_descriptor(chan, desc);
  539. }
  540. spin_unlock_irqrestore(&chan->lock, irqflags);
  541. }
  542. /**
  543. * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete
  544. * @chan: ZynqMP DMA channel pointer
  545. */
  546. static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
  547. {
  548. struct zynqmp_dma_desc_sw *desc;
  549. desc = list_first_entry_or_null(&chan->active_list,
  550. struct zynqmp_dma_desc_sw, node);
  551. if (!desc)
  552. return;
  553. list_del(&desc->node);
  554. dma_cookie_complete(&desc->async_tx);
  555. list_add_tail(&desc->node, &chan->done_list);
  556. }
  557. /**
  558. * zynqmp_dma_issue_pending - Issue pending transactions
  559. * @dchan: DMA channel pointer
  560. */
  561. static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
  562. {
  563. struct zynqmp_dma_chan *chan = to_chan(dchan);
  564. unsigned long irqflags;
  565. spin_lock_irqsave(&chan->lock, irqflags);
  566. zynqmp_dma_start_transfer(chan);
  567. spin_unlock_irqrestore(&chan->lock, irqflags);
  568. }
  569. /**
  570. * zynqmp_dma_free_descriptors - Free channel descriptors
  571. * @chan: ZynqMP DMA channel pointer
  572. */
  573. static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
  574. {
  575. unsigned long irqflags;
  576. spin_lock_irqsave(&chan->lock, irqflags);
  577. zynqmp_dma_free_desc_list(chan, &chan->active_list);
  578. zynqmp_dma_free_desc_list(chan, &chan->pending_list);
  579. zynqmp_dma_free_desc_list(chan, &chan->done_list);
  580. spin_unlock_irqrestore(&chan->lock, irqflags);
  581. }
  582. /**
  583. * zynqmp_dma_free_chan_resources - Free channel resources
  584. * @dchan: DMA channel pointer
  585. */
  586. static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
  587. {
  588. struct zynqmp_dma_chan *chan = to_chan(dchan);
  589. zynqmp_dma_free_descriptors(chan);
  590. dma_free_coherent(chan->dev,
  591. (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
  592. chan->desc_pool_v, chan->desc_pool_p);
  593. kfree(chan->sw_desc_pool);
  594. pm_runtime_mark_last_busy(chan->dev);
  595. pm_runtime_put_autosuspend(chan->dev);
  596. }
  597. /**
  598. * zynqmp_dma_reset - Reset the channel
  599. * @chan: ZynqMP DMA channel pointer
  600. */
  601. static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
  602. {
  603. unsigned long irqflags;
  604. writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
  605. spin_lock_irqsave(&chan->lock, irqflags);
  606. zynqmp_dma_complete_descriptor(chan);
  607. spin_unlock_irqrestore(&chan->lock, irqflags);
  608. zynqmp_dma_chan_desc_cleanup(chan);
  609. zynqmp_dma_free_descriptors(chan);
  610. zynqmp_dma_init(chan);
  611. }
  612. /**
  613. * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler
  614. * @irq: IRQ number
  615. * @data: Pointer to the ZynqMP DMA channel structure
  616. *
  617. * Return: IRQ_HANDLED/IRQ_NONE
  618. */
  619. static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
  620. {
  621. struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
  622. u32 isr, imr, status;
  623. irqreturn_t ret = IRQ_NONE;
  624. isr = readl(chan->regs + ZYNQMP_DMA_ISR);
  625. imr = readl(chan->regs + ZYNQMP_DMA_IMR);
  626. status = isr & ~imr;
  627. writel(isr, chan->regs + ZYNQMP_DMA_ISR);
  628. if (status & ZYNQMP_DMA_INT_DONE) {
  629. tasklet_schedule(&chan->tasklet);
  630. ret = IRQ_HANDLED;
  631. }
  632. if (status & ZYNQMP_DMA_DONE)
  633. chan->idle = true;
  634. if (status & ZYNQMP_DMA_INT_ERR) {
  635. chan->err = true;
  636. tasklet_schedule(&chan->tasklet);
  637. dev_err(chan->dev, "Channel %p has errors\n", chan);
  638. ret = IRQ_HANDLED;
  639. }
  640. if (status & ZYNQMP_DMA_INT_OVRFL) {
  641. zynqmp_dma_handle_ovfl_int(chan, status);
  642. dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan);
  643. ret = IRQ_HANDLED;
  644. }
  645. return ret;
  646. }
  647. /**
  648. * zynqmp_dma_do_tasklet - Schedule completion tasklet
  649. * @t: Pointer to the ZynqMP DMA channel structure
  650. */
  651. static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
  652. {
  653. struct zynqmp_dma_chan *chan = from_tasklet(chan, t, tasklet);
  654. u32 count;
  655. unsigned long irqflags;
  656. if (chan->err) {
  657. zynqmp_dma_reset(chan);
  658. chan->err = false;
  659. return;
  660. }
  661. spin_lock_irqsave(&chan->lock, irqflags);
  662. count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
  663. while (count) {
  664. zynqmp_dma_complete_descriptor(chan);
  665. count--;
  666. }
  667. spin_unlock_irqrestore(&chan->lock, irqflags);
  668. zynqmp_dma_chan_desc_cleanup(chan);
  669. if (chan->idle) {
  670. spin_lock_irqsave(&chan->lock, irqflags);
  671. zynqmp_dma_start_transfer(chan);
  672. spin_unlock_irqrestore(&chan->lock, irqflags);
  673. }
  674. }
  675. /**
  676. * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel
  677. * @dchan: DMA channel pointer
  678. *
  679. * Return: Always '0'
  680. */
  681. static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
  682. {
  683. struct zynqmp_dma_chan *chan = to_chan(dchan);
  684. writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
  685. zynqmp_dma_free_descriptors(chan);
  686. return 0;
  687. }
  688. /**
  689. * zynqmp_dma_synchronize - Synchronizes the termination of a transfers to the current context.
  690. * @dchan: DMA channel pointer
  691. */
  692. static void zynqmp_dma_synchronize(struct dma_chan *dchan)
  693. {
  694. struct zynqmp_dma_chan *chan = to_chan(dchan);
  695. tasklet_kill(&chan->tasklet);
  696. }
  697. /**
  698. * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
  699. * @dchan: DMA channel
  700. * @dma_dst: Destination buffer address
  701. * @dma_src: Source buffer address
  702. * @len: Transfer length
  703. * @flags: transfer ack flags
  704. *
  705. * Return: Async transaction descriptor on success and NULL on failure
  706. */
  707. static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
  708. struct dma_chan *dchan, dma_addr_t dma_dst,
  709. dma_addr_t dma_src, size_t len, ulong flags)
  710. {
  711. struct zynqmp_dma_chan *chan;
  712. struct zynqmp_dma_desc_sw *new, *first = NULL;
  713. void *desc = NULL, *prev = NULL;
  714. size_t copy;
  715. u32 desc_cnt;
  716. unsigned long irqflags;
  717. chan = to_chan(dchan);
  718. desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
  719. spin_lock_irqsave(&chan->lock, irqflags);
  720. if (desc_cnt > chan->desc_free_cnt) {
  721. spin_unlock_irqrestore(&chan->lock, irqflags);
  722. dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
  723. return NULL;
  724. }
  725. chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
  726. spin_unlock_irqrestore(&chan->lock, irqflags);
  727. do {
  728. /* Allocate and populate the descriptor */
  729. new = zynqmp_dma_get_descriptor(chan);
  730. copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
  731. desc = (struct zynqmp_dma_desc_ll *)new->src_v;
  732. zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src,
  733. dma_dst, copy, prev);
  734. prev = desc;
  735. len -= copy;
  736. dma_src += copy;
  737. dma_dst += copy;
  738. if (!first)
  739. first = new;
  740. else
  741. list_add_tail(&new->node, &first->tx_list);
  742. } while (len);
  743. zynqmp_dma_desc_config_eod(chan, desc);
  744. async_tx_ack(&first->async_tx);
  745. first->async_tx.flags = (enum dma_ctrl_flags)flags;
  746. return &first->async_tx;
  747. }
  748. /**
  749. * zynqmp_dma_chan_remove - Channel remove function
  750. * @chan: ZynqMP DMA channel pointer
  751. */
  752. static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
  753. {
  754. if (!chan)
  755. return;
  756. if (chan->irq)
  757. devm_free_irq(chan->zdev->dev, chan->irq, chan);
  758. tasklet_kill(&chan->tasklet);
  759. list_del(&chan->common.device_node);
  760. }
  761. /**
  762. * zynqmp_dma_chan_probe - Per Channel Probing
  763. * @zdev: Driver specific device structure
  764. * @pdev: Pointer to the platform_device structure
  765. *
  766. * Return: '0' on success and failure value on error
  767. */
  768. static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
  769. struct platform_device *pdev)
  770. {
  771. struct zynqmp_dma_chan *chan;
  772. struct resource *res;
  773. struct device_node *node = pdev->dev.of_node;
  774. int err;
  775. chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
  776. if (!chan)
  777. return -ENOMEM;
  778. chan->dev = zdev->dev;
  779. chan->zdev = zdev;
  780. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  781. chan->regs = devm_ioremap_resource(&pdev->dev, res);
  782. if (IS_ERR(chan->regs))
  783. return PTR_ERR(chan->regs);
  784. chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
  785. chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
  786. chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
  787. err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
  788. if (err < 0) {
  789. dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
  790. return err;
  791. }
  792. if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 &&
  793. chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) {
  794. dev_err(zdev->dev, "invalid bus-width value");
  795. return -EINVAL;
  796. }
  797. chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
  798. zdev->chan = chan;
  799. tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet);
  800. spin_lock_init(&chan->lock);
  801. INIT_LIST_HEAD(&chan->active_list);
  802. INIT_LIST_HEAD(&chan->pending_list);
  803. INIT_LIST_HEAD(&chan->done_list);
  804. INIT_LIST_HEAD(&chan->free_list);
  805. dma_cookie_init(&chan->common);
  806. chan->common.device = &zdev->common;
  807. list_add_tail(&chan->common.device_node, &zdev->common.channels);
  808. zynqmp_dma_init(chan);
  809. chan->irq = platform_get_irq(pdev, 0);
  810. if (chan->irq < 0)
  811. return -ENXIO;
  812. err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0,
  813. "zynqmp-dma", chan);
  814. if (err)
  815. return err;
  816. chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
  817. chan->idle = true;
  818. return 0;
  819. }
  820. /**
  821. * of_zynqmp_dma_xlate - Translation function
  822. * @dma_spec: Pointer to DMA specifier as found in the device tree
  823. * @ofdma: Pointer to DMA controller data
  824. *
  825. * Return: DMA channel pointer on success and NULL on error
  826. */
  827. static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
  828. struct of_dma *ofdma)
  829. {
  830. struct zynqmp_dma_device *zdev = ofdma->of_dma_data;
  831. return dma_get_slave_channel(&zdev->chan->common);
  832. }
  833. /**
  834. * zynqmp_dma_suspend - Suspend method for the driver
  835. * @dev: Address of the device structure
  836. *
  837. * Put the driver into low power mode.
  838. * Return: 0 on success and failure value on error
  839. */
  840. static int __maybe_unused zynqmp_dma_suspend(struct device *dev)
  841. {
  842. if (!device_may_wakeup(dev))
  843. return pm_runtime_force_suspend(dev);
  844. return 0;
  845. }
  846. /**
  847. * zynqmp_dma_resume - Resume from suspend
  848. * @dev: Address of the device structure
  849. *
  850. * Resume operation after suspend.
  851. * Return: 0 on success and failure value on error
  852. */
  853. static int __maybe_unused zynqmp_dma_resume(struct device *dev)
  854. {
  855. if (!device_may_wakeup(dev))
  856. return pm_runtime_force_resume(dev);
  857. return 0;
  858. }
  859. /**
  860. * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
  861. * @dev: Address of the device structure
  862. *
  863. * Put the driver into low power mode.
  864. * Return: 0 always
  865. */
  866. static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev)
  867. {
  868. struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
  869. clk_disable_unprepare(zdev->clk_main);
  870. clk_disable_unprepare(zdev->clk_apb);
  871. return 0;
  872. }
  873. /**
  874. * zynqmp_dma_runtime_resume - Runtime suspend method for the driver
  875. * @dev: Address of the device structure
  876. *
  877. * Put the driver into low power mode.
  878. * Return: 0 always
  879. */
  880. static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev)
  881. {
  882. struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
  883. int err;
  884. err = clk_prepare_enable(zdev->clk_main);
  885. if (err) {
  886. dev_err(dev, "Unable to enable main clock.\n");
  887. return err;
  888. }
  889. err = clk_prepare_enable(zdev->clk_apb);
  890. if (err) {
  891. dev_err(dev, "Unable to enable apb clock.\n");
  892. clk_disable_unprepare(zdev->clk_main);
  893. return err;
  894. }
  895. return 0;
  896. }
  897. static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = {
  898. SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume)
  899. SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend,
  900. zynqmp_dma_runtime_resume, NULL)
  901. };
  902. /**
  903. * zynqmp_dma_probe - Driver probe function
  904. * @pdev: Pointer to the platform_device structure
  905. *
  906. * Return: '0' on success and failure value on error
  907. */
  908. static int zynqmp_dma_probe(struct platform_device *pdev)
  909. {
  910. struct zynqmp_dma_device *zdev;
  911. struct dma_device *p;
  912. int ret;
  913. zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL);
  914. if (!zdev)
  915. return -ENOMEM;
  916. zdev->dev = &pdev->dev;
  917. INIT_LIST_HEAD(&zdev->common.channels);
  918. dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
  919. dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
  920. p = &zdev->common;
  921. p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
  922. p->device_terminate_all = zynqmp_dma_device_terminate_all;
  923. p->device_synchronize = zynqmp_dma_synchronize;
  924. p->device_issue_pending = zynqmp_dma_issue_pending;
  925. p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
  926. p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
  927. p->device_tx_status = dma_cookie_status;
  928. p->device_config = zynqmp_dma_device_config;
  929. p->dev = &pdev->dev;
  930. zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
  931. if (IS_ERR(zdev->clk_main))
  932. return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_main),
  933. "main clock not found.\n");
  934. zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
  935. if (IS_ERR(zdev->clk_apb))
  936. return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_apb),
  937. "apb clock not found.\n");
  938. platform_set_drvdata(pdev, zdev);
  939. pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
  940. pm_runtime_use_autosuspend(zdev->dev);
  941. pm_runtime_enable(zdev->dev);
  942. ret = pm_runtime_resume_and_get(zdev->dev);
  943. if (ret < 0) {
  944. dev_err(&pdev->dev, "device wakeup failed.\n");
  945. pm_runtime_disable(zdev->dev);
  946. }
  947. if (!pm_runtime_enabled(zdev->dev)) {
  948. ret = zynqmp_dma_runtime_resume(zdev->dev);
  949. if (ret)
  950. return ret;
  951. }
  952. ret = zynqmp_dma_chan_probe(zdev, pdev);
  953. if (ret) {
  954. dev_err_probe(&pdev->dev, ret, "Probing channel failed\n");
  955. goto err_disable_pm;
  956. }
  957. p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
  958. p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
  959. ret = dma_async_device_register(&zdev->common);
  960. if (ret) {
  961. dev_err(zdev->dev, "failed to register the dma device\n");
  962. goto free_chan_resources;
  963. }
  964. ret = of_dma_controller_register(pdev->dev.of_node,
  965. of_zynqmp_dma_xlate, zdev);
  966. if (ret) {
  967. dev_err_probe(&pdev->dev, ret, "Unable to register DMA to DT\n");
  968. dma_async_device_unregister(&zdev->common);
  969. goto free_chan_resources;
  970. }
  971. pm_runtime_mark_last_busy(zdev->dev);
  972. pm_runtime_put_sync_autosuspend(zdev->dev);
  973. return 0;
  974. free_chan_resources:
  975. zynqmp_dma_chan_remove(zdev->chan);
  976. err_disable_pm:
  977. if (!pm_runtime_enabled(zdev->dev))
  978. zynqmp_dma_runtime_suspend(zdev->dev);
  979. pm_runtime_disable(zdev->dev);
  980. return ret;
  981. }
  982. /**
  983. * zynqmp_dma_remove - Driver remove function
  984. * @pdev: Pointer to the platform_device structure
  985. *
  986. * Return: Always '0'
  987. */
  988. static int zynqmp_dma_remove(struct platform_device *pdev)
  989. {
  990. struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
  991. of_dma_controller_free(pdev->dev.of_node);
  992. dma_async_device_unregister(&zdev->common);
  993. zynqmp_dma_chan_remove(zdev->chan);
  994. pm_runtime_disable(zdev->dev);
  995. if (!pm_runtime_enabled(zdev->dev))
  996. zynqmp_dma_runtime_suspend(zdev->dev);
  997. return 0;
  998. }
  999. static const struct of_device_id zynqmp_dma_of_match[] = {
  1000. { .compatible = "xlnx,zynqmp-dma-1.0", },
  1001. {}
  1002. };
  1003. MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match);
  1004. static struct platform_driver zynqmp_dma_driver = {
  1005. .driver = {
  1006. .name = "xilinx-zynqmp-dma",
  1007. .of_match_table = zynqmp_dma_of_match,
  1008. .pm = &zynqmp_dma_dev_pm_ops,
  1009. },
  1010. .probe = zynqmp_dma_probe,
  1011. .remove = zynqmp_dma_remove,
  1012. };
  1013. module_platform_driver(zynqmp_dma_driver);
  1014. MODULE_LICENSE("GPL");
  1015. MODULE_AUTHOR("Xilinx, Inc.");
  1016. MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver");