usb-dmac.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Renesas USB DMA Controller Driver
  4. *
  5. * Copyright (C) 2015 Renesas Electronics Corporation
  6. *
  7. * based on rcar-dmac.c
  8. * Copyright (C) 2014 Renesas Electronics Inc.
  9. * Author: Laurent Pinchart <[email protected]>
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/list.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/of_dma.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include "../dmaengine.h"
  25. #include "../virt-dma.h"
  26. /*
  27. * struct usb_dmac_sg - Descriptor for a hardware transfer
  28. * @mem_addr: memory address
  29. * @size: transfer size in bytes
  30. */
  31. struct usb_dmac_sg {
  32. dma_addr_t mem_addr;
  33. u32 size;
  34. };
  35. /*
  36. * struct usb_dmac_desc - USB DMA Transfer Descriptor
  37. * @vd: base virtual channel DMA transaction descriptor
  38. * @direction: direction of the DMA transfer
  39. * @sg_allocated_len: length of allocated sg
  40. * @sg_len: length of sg
  41. * @sg_index: index of sg
  42. * @residue: residue after the DMAC completed a transfer
  43. * @node: node for desc_got and desc_freed
  44. * @done_cookie: cookie after the DMAC completed a transfer
  45. * @sg: information for the transfer
  46. */
  47. struct usb_dmac_desc {
  48. struct virt_dma_desc vd;
  49. enum dma_transfer_direction direction;
  50. unsigned int sg_allocated_len;
  51. unsigned int sg_len;
  52. unsigned int sg_index;
  53. u32 residue;
  54. struct list_head node;
  55. dma_cookie_t done_cookie;
  56. struct usb_dmac_sg sg[];
  57. };
  58. #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
  59. /*
  60. * struct usb_dmac_chan - USB DMA Controller Channel
  61. * @vc: base virtual DMA channel object
  62. * @iomem: channel I/O memory base
  63. * @index: index of this channel in the controller
  64. * @irq: irq number of this channel
  65. * @desc: the current descriptor
  66. * @descs_allocated: number of descriptors allocated
  67. * @desc_got: got descriptors
  68. * @desc_freed: freed descriptors after the DMAC completed a transfer
  69. */
  70. struct usb_dmac_chan {
  71. struct virt_dma_chan vc;
  72. void __iomem *iomem;
  73. unsigned int index;
  74. int irq;
  75. struct usb_dmac_desc *desc;
  76. int descs_allocated;
  77. struct list_head desc_got;
  78. struct list_head desc_freed;
  79. };
  80. #define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan)
  81. /*
  82. * struct usb_dmac - USB DMA Controller
  83. * @engine: base DMA engine object
  84. * @dev: the hardware device
  85. * @iomem: remapped I/O memory base
  86. * @n_channels: number of available channels
  87. * @channels: array of DMAC channels
  88. */
  89. struct usb_dmac {
  90. struct dma_device engine;
  91. struct device *dev;
  92. void __iomem *iomem;
  93. unsigned int n_channels;
  94. struct usb_dmac_chan *channels;
  95. };
  96. #define to_usb_dmac(d) container_of(d, struct usb_dmac, engine)
  97. /* -----------------------------------------------------------------------------
  98. * Registers
  99. */
  100. #define USB_DMAC_CHAN_OFFSET(i) (0x20 + 0x20 * (i))
  101. #define USB_DMASWR 0x0008
  102. #define USB_DMASWR_SWR (1 << 0)
  103. #define USB_DMAOR 0x0060
  104. #define USB_DMAOR_AE (1 << 1)
  105. #define USB_DMAOR_DME (1 << 0)
  106. #define USB_DMASAR 0x0000
  107. #define USB_DMADAR 0x0004
  108. #define USB_DMATCR 0x0008
  109. #define USB_DMATCR_MASK 0x00ffffff
  110. #define USB_DMACHCR 0x0014
  111. #define USB_DMACHCR_FTE (1 << 24)
  112. #define USB_DMACHCR_NULLE (1 << 16)
  113. #define USB_DMACHCR_NULL (1 << 12)
  114. #define USB_DMACHCR_TS_8B ((0 << 7) | (0 << 6))
  115. #define USB_DMACHCR_TS_16B ((0 << 7) | (1 << 6))
  116. #define USB_DMACHCR_TS_32B ((1 << 7) | (0 << 6))
  117. #define USB_DMACHCR_IE (1 << 5)
  118. #define USB_DMACHCR_SP (1 << 2)
  119. #define USB_DMACHCR_TE (1 << 1)
  120. #define USB_DMACHCR_DE (1 << 0)
  121. #define USB_DMATEND 0x0018
  122. /* Hardcode the xfer_shift to 5 (32bytes) */
  123. #define USB_DMAC_XFER_SHIFT 5
  124. #define USB_DMAC_XFER_SIZE (1 << USB_DMAC_XFER_SHIFT)
  125. #define USB_DMAC_CHCR_TS USB_DMACHCR_TS_32B
  126. #define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES
  127. /* for descriptors */
  128. #define USB_DMAC_INITIAL_NR_DESC 16
  129. #define USB_DMAC_INITIAL_NR_SG 8
  130. /* -----------------------------------------------------------------------------
  131. * Device access
  132. */
  133. static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data)
  134. {
  135. writel(data, dmac->iomem + reg);
  136. }
  137. static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg)
  138. {
  139. return readl(dmac->iomem + reg);
  140. }
  141. static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg)
  142. {
  143. return readl(chan->iomem + reg);
  144. }
  145. static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data)
  146. {
  147. writel(data, chan->iomem + reg);
  148. }
  149. /* -----------------------------------------------------------------------------
  150. * Initialization and configuration
  151. */
  152. static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan)
  153. {
  154. u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
  155. return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE;
  156. }
  157. static u32 usb_dmac_calc_tend(u32 size)
  158. {
  159. /*
  160. * Please refer to the Figure "Example of Final Transaction Valid
  161. * Data Transfer Enable (EDTEN) Setting" in the data sheet.
  162. */
  163. return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? :
  164. USB_DMAC_XFER_SIZE));
  165. }
  166. /* This function is already held by vc.lock */
  167. static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan,
  168. unsigned int index)
  169. {
  170. struct usb_dmac_desc *desc = chan->desc;
  171. struct usb_dmac_sg *sg = desc->sg + index;
  172. dma_addr_t src_addr = 0, dst_addr = 0;
  173. WARN_ON_ONCE(usb_dmac_chan_is_busy(chan));
  174. if (desc->direction == DMA_DEV_TO_MEM)
  175. dst_addr = sg->mem_addr;
  176. else
  177. src_addr = sg->mem_addr;
  178. dev_dbg(chan->vc.chan.device->dev,
  179. "chan%u: queue sg %p: %u@%pad -> %pad\n",
  180. chan->index, sg, sg->size, &src_addr, &dst_addr);
  181. usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff);
  182. usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff);
  183. usb_dmac_chan_write(chan, USB_DMATCR,
  184. DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE));
  185. usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size));
  186. usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS |
  187. USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE);
  188. }
  189. /* This function is already held by vc.lock */
  190. static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan)
  191. {
  192. struct virt_dma_desc *vd;
  193. vd = vchan_next_desc(&chan->vc);
  194. if (!vd) {
  195. chan->desc = NULL;
  196. return;
  197. }
  198. /*
  199. * Remove this request from vc->desc_issued. Otherwise, this driver
  200. * will get the previous value from vchan_next_desc() after a transfer
  201. * was completed.
  202. */
  203. list_del(&vd->node);
  204. chan->desc = to_usb_dmac_desc(vd);
  205. chan->desc->sg_index = 0;
  206. usb_dmac_chan_start_sg(chan, 0);
  207. }
  208. static int usb_dmac_init(struct usb_dmac *dmac)
  209. {
  210. u16 dmaor;
  211. /* Clear all channels and enable the DMAC globally. */
  212. usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME);
  213. dmaor = usb_dmac_read(dmac, USB_DMAOR);
  214. if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) {
  215. dev_warn(dmac->dev, "DMAOR initialization failed.\n");
  216. return -EIO;
  217. }
  218. return 0;
  219. }
  220. /* -----------------------------------------------------------------------------
  221. * Descriptors allocation and free
  222. */
  223. static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
  224. gfp_t gfp)
  225. {
  226. struct usb_dmac_desc *desc;
  227. unsigned long flags;
  228. desc = kzalloc(struct_size(desc, sg, sg_len), gfp);
  229. if (!desc)
  230. return -ENOMEM;
  231. desc->sg_allocated_len = sg_len;
  232. INIT_LIST_HEAD(&desc->node);
  233. spin_lock_irqsave(&chan->vc.lock, flags);
  234. list_add_tail(&desc->node, &chan->desc_freed);
  235. spin_unlock_irqrestore(&chan->vc.lock, flags);
  236. return 0;
  237. }
  238. static void usb_dmac_desc_free(struct usb_dmac_chan *chan)
  239. {
  240. struct usb_dmac_desc *desc, *_desc;
  241. LIST_HEAD(list);
  242. list_splice_init(&chan->desc_freed, &list);
  243. list_splice_init(&chan->desc_got, &list);
  244. list_for_each_entry_safe(desc, _desc, &list, node) {
  245. list_del(&desc->node);
  246. kfree(desc);
  247. }
  248. chan->descs_allocated = 0;
  249. }
  250. static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
  251. unsigned int sg_len, gfp_t gfp)
  252. {
  253. struct usb_dmac_desc *desc = NULL;
  254. unsigned long flags;
  255. /* Get a freed descritpor */
  256. spin_lock_irqsave(&chan->vc.lock, flags);
  257. list_for_each_entry(desc, &chan->desc_freed, node) {
  258. if (sg_len <= desc->sg_allocated_len) {
  259. list_move_tail(&desc->node, &chan->desc_got);
  260. spin_unlock_irqrestore(&chan->vc.lock, flags);
  261. return desc;
  262. }
  263. }
  264. spin_unlock_irqrestore(&chan->vc.lock, flags);
  265. /* Allocate a new descriptor */
  266. if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
  267. /* If allocated the desc, it was added to tail of the list */
  268. spin_lock_irqsave(&chan->vc.lock, flags);
  269. desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc,
  270. node);
  271. list_move_tail(&desc->node, &chan->desc_got);
  272. spin_unlock_irqrestore(&chan->vc.lock, flags);
  273. return desc;
  274. }
  275. return NULL;
  276. }
  277. static void usb_dmac_desc_put(struct usb_dmac_chan *chan,
  278. struct usb_dmac_desc *desc)
  279. {
  280. unsigned long flags;
  281. spin_lock_irqsave(&chan->vc.lock, flags);
  282. list_move_tail(&desc->node, &chan->desc_freed);
  283. spin_unlock_irqrestore(&chan->vc.lock, flags);
  284. }
  285. /* -----------------------------------------------------------------------------
  286. * Stop and reset
  287. */
  288. static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan)
  289. {
  290. struct dma_chan *chan = &uchan->vc.chan;
  291. struct usb_dmac *dmac = to_usb_dmac(chan->device);
  292. int i;
  293. /* Don't issue soft reset if any one of channels is busy */
  294. for (i = 0; i < dmac->n_channels; ++i) {
  295. if (usb_dmac_chan_is_busy(uchan))
  296. return;
  297. }
  298. usb_dmac_write(dmac, USB_DMAOR, 0);
  299. usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR);
  300. udelay(100);
  301. usb_dmac_write(dmac, USB_DMASWR, 0);
  302. usb_dmac_write(dmac, USB_DMAOR, 1);
  303. }
  304. static void usb_dmac_chan_halt(struct usb_dmac_chan *chan)
  305. {
  306. u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
  307. chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE);
  308. usb_dmac_chan_write(chan, USB_DMACHCR, chcr);
  309. usb_dmac_soft_reset(chan);
  310. }
  311. static void usb_dmac_stop(struct usb_dmac *dmac)
  312. {
  313. usb_dmac_write(dmac, USB_DMAOR, 0);
  314. }
  315. /* -----------------------------------------------------------------------------
  316. * DMA engine operations
  317. */
  318. static int usb_dmac_alloc_chan_resources(struct dma_chan *chan)
  319. {
  320. struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
  321. int ret;
  322. while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) {
  323. ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG,
  324. GFP_KERNEL);
  325. if (ret < 0) {
  326. usb_dmac_desc_free(uchan);
  327. return ret;
  328. }
  329. uchan->descs_allocated++;
  330. }
  331. return pm_runtime_get_sync(chan->device->dev);
  332. }
  333. static void usb_dmac_free_chan_resources(struct dma_chan *chan)
  334. {
  335. struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
  336. unsigned long flags;
  337. /* Protect against ISR */
  338. spin_lock_irqsave(&uchan->vc.lock, flags);
  339. usb_dmac_chan_halt(uchan);
  340. spin_unlock_irqrestore(&uchan->vc.lock, flags);
  341. usb_dmac_desc_free(uchan);
  342. vchan_free_chan_resources(&uchan->vc);
  343. pm_runtime_put(chan->device->dev);
  344. }
  345. static struct dma_async_tx_descriptor *
  346. usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  347. unsigned int sg_len, enum dma_transfer_direction dir,
  348. unsigned long dma_flags, void *context)
  349. {
  350. struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
  351. struct usb_dmac_desc *desc;
  352. struct scatterlist *sg;
  353. int i;
  354. if (!sg_len) {
  355. dev_warn(chan->device->dev,
  356. "%s: bad parameter: len=%d\n", __func__, sg_len);
  357. return NULL;
  358. }
  359. desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT);
  360. if (!desc)
  361. return NULL;
  362. desc->direction = dir;
  363. desc->sg_len = sg_len;
  364. for_each_sg(sgl, sg, sg_len, i) {
  365. desc->sg[i].mem_addr = sg_dma_address(sg);
  366. desc->sg[i].size = sg_dma_len(sg);
  367. }
  368. return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
  369. }
  370. static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
  371. {
  372. struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
  373. struct usb_dmac_desc *desc, *_desc;
  374. unsigned long flags;
  375. LIST_HEAD(head);
  376. LIST_HEAD(list);
  377. spin_lock_irqsave(&uchan->vc.lock, flags);
  378. usb_dmac_chan_halt(uchan);
  379. vchan_get_all_descriptors(&uchan->vc, &head);
  380. if (uchan->desc)
  381. uchan->desc = NULL;
  382. list_splice_init(&uchan->desc_got, &list);
  383. list_for_each_entry_safe(desc, _desc, &list, node)
  384. list_move_tail(&desc->node, &uchan->desc_freed);
  385. spin_unlock_irqrestore(&uchan->vc.lock, flags);
  386. vchan_dma_desc_free_list(&uchan->vc, &head);
  387. return 0;
  388. }
  389. static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
  390. struct usb_dmac_desc *desc,
  391. unsigned int sg_index)
  392. {
  393. struct usb_dmac_sg *sg = desc->sg + sg_index;
  394. u32 mem_addr = sg->mem_addr & 0xffffffff;
  395. unsigned int residue = sg->size;
  396. /*
  397. * We cannot use USB_DMATCR to calculate residue because USB_DMATCR
  398. * has unsuited value to calculate.
  399. */
  400. if (desc->direction == DMA_DEV_TO_MEM)
  401. residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr;
  402. else
  403. residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr;
  404. return residue;
  405. }
  406. static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan,
  407. dma_cookie_t cookie)
  408. {
  409. struct usb_dmac_desc *desc;
  410. u32 residue = 0;
  411. list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
  412. if (desc->done_cookie == cookie) {
  413. residue = desc->residue;
  414. break;
  415. }
  416. }
  417. return residue;
  418. }
  419. static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan,
  420. dma_cookie_t cookie)
  421. {
  422. u32 residue = 0;
  423. struct virt_dma_desc *vd;
  424. struct usb_dmac_desc *desc = chan->desc;
  425. int i;
  426. if (!desc) {
  427. vd = vchan_find_desc(&chan->vc, cookie);
  428. if (!vd)
  429. return 0;
  430. desc = to_usb_dmac_desc(vd);
  431. }
  432. /* Compute the size of all usb_dmac_sg still to be transferred */
  433. for (i = desc->sg_index + 1; i < desc->sg_len; i++)
  434. residue += desc->sg[i].size;
  435. /* Add the residue for the current sg */
  436. residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index);
  437. return residue;
  438. }
  439. static enum dma_status usb_dmac_tx_status(struct dma_chan *chan,
  440. dma_cookie_t cookie,
  441. struct dma_tx_state *txstate)
  442. {
  443. struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
  444. enum dma_status status;
  445. unsigned int residue = 0;
  446. unsigned long flags;
  447. status = dma_cookie_status(chan, cookie, txstate);
  448. /* a client driver will get residue after DMA_COMPLETE */
  449. if (!txstate)
  450. return status;
  451. spin_lock_irqsave(&uchan->vc.lock, flags);
  452. if (status == DMA_COMPLETE)
  453. residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie);
  454. else
  455. residue = usb_dmac_chan_get_residue(uchan, cookie);
  456. spin_unlock_irqrestore(&uchan->vc.lock, flags);
  457. dma_set_residue(txstate, residue);
  458. return status;
  459. }
  460. static void usb_dmac_issue_pending(struct dma_chan *chan)
  461. {
  462. struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
  463. unsigned long flags;
  464. spin_lock_irqsave(&uchan->vc.lock, flags);
  465. if (vchan_issue_pending(&uchan->vc) && !uchan->desc)
  466. usb_dmac_chan_start_desc(uchan);
  467. spin_unlock_irqrestore(&uchan->vc.lock, flags);
  468. }
  469. static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd)
  470. {
  471. struct usb_dmac_desc *desc = to_usb_dmac_desc(vd);
  472. struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan);
  473. usb_dmac_desc_put(chan, desc);
  474. }
  475. /* -----------------------------------------------------------------------------
  476. * IRQ handling
  477. */
  478. static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
  479. {
  480. struct usb_dmac_desc *desc = chan->desc;
  481. BUG_ON(!desc);
  482. if (++desc->sg_index < desc->sg_len) {
  483. usb_dmac_chan_start_sg(chan, desc->sg_index);
  484. } else {
  485. desc->residue = usb_dmac_get_current_residue(chan, desc,
  486. desc->sg_index - 1);
  487. desc->done_cookie = desc->vd.tx.cookie;
  488. desc->vd.tx_result.result = DMA_TRANS_NOERROR;
  489. desc->vd.tx_result.residue = desc->residue;
  490. vchan_cookie_complete(&desc->vd);
  491. /* Restart the next transfer if this driver has a next desc */
  492. usb_dmac_chan_start_desc(chan);
  493. }
  494. }
  495. static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
  496. {
  497. struct usb_dmac_chan *chan = dev;
  498. irqreturn_t ret = IRQ_NONE;
  499. u32 mask = 0;
  500. u32 chcr;
  501. bool xfer_end = false;
  502. spin_lock(&chan->vc.lock);
  503. chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
  504. if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
  505. mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
  506. if (chcr & USB_DMACHCR_DE)
  507. xfer_end = true;
  508. ret |= IRQ_HANDLED;
  509. }
  510. if (chcr & USB_DMACHCR_NULL) {
  511. /* An interruption of TE will happen after we set FTE */
  512. mask |= USB_DMACHCR_NULL;
  513. chcr |= USB_DMACHCR_FTE;
  514. ret |= IRQ_HANDLED;
  515. }
  516. if (mask)
  517. usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
  518. if (xfer_end)
  519. usb_dmac_isr_transfer_end(chan);
  520. spin_unlock(&chan->vc.lock);
  521. return ret;
  522. }
  523. /* -----------------------------------------------------------------------------
  524. * OF xlate and channel filter
  525. */
  526. static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
  527. {
  528. struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
  529. struct of_phandle_args *dma_spec = arg;
  530. /* USB-DMAC should be used with fixed usb controller's FIFO */
  531. if (uchan->index != dma_spec->args[0])
  532. return false;
  533. return true;
  534. }
  535. static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
  536. struct of_dma *ofdma)
  537. {
  538. struct dma_chan *chan;
  539. dma_cap_mask_t mask;
  540. if (dma_spec->args_count != 1)
  541. return NULL;
  542. /* Only slave DMA channels can be allocated via DT */
  543. dma_cap_zero(mask);
  544. dma_cap_set(DMA_SLAVE, mask);
  545. chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec,
  546. ofdma->of_node);
  547. if (!chan)
  548. return NULL;
  549. return chan;
  550. }
  551. /* -----------------------------------------------------------------------------
  552. * Power management
  553. */
  554. #ifdef CONFIG_PM
  555. static int usb_dmac_runtime_suspend(struct device *dev)
  556. {
  557. struct usb_dmac *dmac = dev_get_drvdata(dev);
  558. int i;
  559. for (i = 0; i < dmac->n_channels; ++i) {
  560. if (!dmac->channels[i].iomem)
  561. break;
  562. usb_dmac_chan_halt(&dmac->channels[i]);
  563. }
  564. return 0;
  565. }
  566. static int usb_dmac_runtime_resume(struct device *dev)
  567. {
  568. struct usb_dmac *dmac = dev_get_drvdata(dev);
  569. return usb_dmac_init(dmac);
  570. }
  571. #endif /* CONFIG_PM */
  572. static const struct dev_pm_ops usb_dmac_pm = {
  573. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
  574. pm_runtime_force_resume)
  575. SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
  576. NULL)
  577. };
  578. /* -----------------------------------------------------------------------------
  579. * Probe and remove
  580. */
  581. static int usb_dmac_chan_probe(struct usb_dmac *dmac,
  582. struct usb_dmac_chan *uchan,
  583. unsigned int index)
  584. {
  585. struct platform_device *pdev = to_platform_device(dmac->dev);
  586. char pdev_irqname[5];
  587. char *irqname;
  588. int ret;
  589. uchan->index = index;
  590. uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
  591. /* Request the channel interrupt. */
  592. sprintf(pdev_irqname, "ch%u", index);
  593. uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
  594. if (uchan->irq < 0)
  595. return -ENODEV;
  596. irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
  597. dev_name(dmac->dev), index);
  598. if (!irqname)
  599. return -ENOMEM;
  600. ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel,
  601. IRQF_SHARED, irqname, uchan);
  602. if (ret) {
  603. dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
  604. uchan->irq, ret);
  605. return ret;
  606. }
  607. uchan->vc.desc_free = usb_dmac_virt_desc_free;
  608. vchan_init(&uchan->vc, &dmac->engine);
  609. INIT_LIST_HEAD(&uchan->desc_freed);
  610. INIT_LIST_HEAD(&uchan->desc_got);
  611. return 0;
  612. }
  613. static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac)
  614. {
  615. struct device_node *np = dev->of_node;
  616. int ret;
  617. ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
  618. if (ret < 0) {
  619. dev_err(dev, "unable to read dma-channels property\n");
  620. return ret;
  621. }
  622. if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
  623. dev_err(dev, "invalid number of channels %u\n",
  624. dmac->n_channels);
  625. return -EINVAL;
  626. }
  627. return 0;
  628. }
  629. static int usb_dmac_probe(struct platform_device *pdev)
  630. {
  631. const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
  632. struct dma_device *engine;
  633. struct usb_dmac *dmac;
  634. struct resource *mem;
  635. unsigned int i;
  636. int ret;
  637. dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
  638. if (!dmac)
  639. return -ENOMEM;
  640. dmac->dev = &pdev->dev;
  641. platform_set_drvdata(pdev, dmac);
  642. ret = usb_dmac_parse_of(&pdev->dev, dmac);
  643. if (ret < 0)
  644. return ret;
  645. dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
  646. sizeof(*dmac->channels), GFP_KERNEL);
  647. if (!dmac->channels)
  648. return -ENOMEM;
  649. /* Request resources. */
  650. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  651. dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
  652. if (IS_ERR(dmac->iomem))
  653. return PTR_ERR(dmac->iomem);
  654. /* Enable runtime PM and initialize the device. */
  655. pm_runtime_enable(&pdev->dev);
  656. ret = pm_runtime_get_sync(&pdev->dev);
  657. if (ret < 0) {
  658. dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
  659. goto error_pm;
  660. }
  661. ret = usb_dmac_init(dmac);
  662. if (ret) {
  663. dev_err(&pdev->dev, "failed to reset device\n");
  664. goto error;
  665. }
  666. /* Initialize the channels. */
  667. INIT_LIST_HEAD(&dmac->engine.channels);
  668. for (i = 0; i < dmac->n_channels; ++i) {
  669. ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i);
  670. if (ret < 0)
  671. goto error;
  672. }
  673. /* Register the DMAC as a DMA provider for DT. */
  674. ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate,
  675. NULL);
  676. if (ret < 0)
  677. goto error;
  678. /*
  679. * Register the DMA engine device.
  680. *
  681. * Default transfer size of 32 bytes requires 32-byte alignment.
  682. */
  683. engine = &dmac->engine;
  684. dma_cap_set(DMA_SLAVE, engine->cap_mask);
  685. engine->dev = &pdev->dev;
  686. engine->src_addr_widths = widths;
  687. engine->dst_addr_widths = widths;
  688. engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
  689. engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  690. engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources;
  691. engine->device_free_chan_resources = usb_dmac_free_chan_resources;
  692. engine->device_prep_slave_sg = usb_dmac_prep_slave_sg;
  693. engine->device_terminate_all = usb_dmac_chan_terminate_all;
  694. engine->device_tx_status = usb_dmac_tx_status;
  695. engine->device_issue_pending = usb_dmac_issue_pending;
  696. ret = dma_async_device_register(engine);
  697. if (ret < 0)
  698. goto error;
  699. pm_runtime_put(&pdev->dev);
  700. return 0;
  701. error:
  702. of_dma_controller_free(pdev->dev.of_node);
  703. error_pm:
  704. pm_runtime_put(&pdev->dev);
  705. pm_runtime_disable(&pdev->dev);
  706. return ret;
  707. }
  708. static void usb_dmac_chan_remove(struct usb_dmac *dmac,
  709. struct usb_dmac_chan *uchan)
  710. {
  711. usb_dmac_chan_halt(uchan);
  712. devm_free_irq(dmac->dev, uchan->irq, uchan);
  713. }
  714. static int usb_dmac_remove(struct platform_device *pdev)
  715. {
  716. struct usb_dmac *dmac = platform_get_drvdata(pdev);
  717. int i;
  718. for (i = 0; i < dmac->n_channels; ++i)
  719. usb_dmac_chan_remove(dmac, &dmac->channels[i]);
  720. of_dma_controller_free(pdev->dev.of_node);
  721. dma_async_device_unregister(&dmac->engine);
  722. pm_runtime_disable(&pdev->dev);
  723. return 0;
  724. }
  725. static void usb_dmac_shutdown(struct platform_device *pdev)
  726. {
  727. struct usb_dmac *dmac = platform_get_drvdata(pdev);
  728. usb_dmac_stop(dmac);
  729. }
  730. static const struct of_device_id usb_dmac_of_ids[] = {
  731. { .compatible = "renesas,usb-dmac", },
  732. { /* Sentinel */ }
  733. };
  734. MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
  735. static struct platform_driver usb_dmac_driver = {
  736. .driver = {
  737. .pm = &usb_dmac_pm,
  738. .name = "usb-dmac",
  739. .of_match_table = usb_dmac_of_ids,
  740. },
  741. .probe = usb_dmac_probe,
  742. .remove = usb_dmac_remove,
  743. .shutdown = usb_dmac_shutdown,
  744. };
  745. module_platform_driver(usb_dmac_driver);
  746. MODULE_DESCRIPTION("Renesas USB DMA Controller Driver");
  747. MODULE_AUTHOR("Yoshihiro Shimoda <[email protected]>");
  748. MODULE_LICENSE("GPL v2");