sf-pdma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * SiFive FU540 Platform DMA driver
  4. * Copyright (C) 2019 SiFive
  5. *
  6. * Based partially on:
  7. * - drivers/dma/fsl-edma.c
  8. * - drivers/dma/dw-edma/
  9. * - drivers/dma/pxa-dma.c
  10. *
  11. * See the following sources for further documentation:
  12. * - Chapter 12 "Platform DMA Engine (PDMA)" of
  13. * SiFive FU540-C000 v1.0
  14. * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
  15. */
  16. #include <linux/module.h>
  17. #include <linux/device.h>
  18. #include <linux/kernel.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/mod_devicetable.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/of.h>
  23. #include <linux/slab.h>
  24. #include "sf-pdma.h"
  25. #ifndef readq
  26. static inline unsigned long long readq(void __iomem *addr)
  27. {
  28. return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
  29. }
  30. #endif
  31. #ifndef writeq
  32. static inline void writeq(unsigned long long v, void __iomem *addr)
  33. {
  34. writel(lower_32_bits(v), addr);
  35. writel(upper_32_bits(v), addr + 4);
  36. }
  37. #endif
  38. static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
  39. {
  40. return container_of(dchan, struct sf_pdma_chan, vchan.chan);
  41. }
  42. static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
  43. {
  44. return container_of(vd, struct sf_pdma_desc, vdesc);
  45. }
  46. static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
  47. {
  48. struct sf_pdma_desc *desc;
  49. desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
  50. if (!desc)
  51. return NULL;
  52. desc->chan = chan;
  53. return desc;
  54. }
  55. static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
  56. u64 dst, u64 src, u64 size)
  57. {
  58. desc->xfer_type = PDMA_FULL_SPEED;
  59. desc->xfer_size = size;
  60. desc->dst_addr = dst;
  61. desc->src_addr = src;
  62. }
  63. static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
  64. {
  65. struct pdma_regs *regs = &chan->regs;
  66. writel(PDMA_CLEAR_CTRL, regs->ctrl);
  67. }
  68. static struct dma_async_tx_descriptor *
  69. sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
  70. size_t len, unsigned long flags)
  71. {
  72. struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
  73. struct sf_pdma_desc *desc;
  74. unsigned long iflags;
  75. if (chan && (!len || !dest || !src)) {
  76. dev_err(chan->pdma->dma_dev.dev,
  77. "Please check dma len, dest, src!\n");
  78. return NULL;
  79. }
  80. desc = sf_pdma_alloc_desc(chan);
  81. if (!desc)
  82. return NULL;
  83. desc->dirn = DMA_MEM_TO_MEM;
  84. desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  85. spin_lock_irqsave(&chan->vchan.lock, iflags);
  86. sf_pdma_fill_desc(desc, dest, src, len);
  87. spin_unlock_irqrestore(&chan->vchan.lock, iflags);
  88. return desc->async_tx;
  89. }
  90. static int sf_pdma_slave_config(struct dma_chan *dchan,
  91. struct dma_slave_config *cfg)
  92. {
  93. struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
  94. memcpy(&chan->cfg, cfg, sizeof(*cfg));
  95. return 0;
  96. }
  97. static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
  98. {
  99. struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
  100. struct pdma_regs *regs = &chan->regs;
  101. dma_cookie_init(dchan);
  102. writel(PDMA_CLAIM_MASK, regs->ctrl);
  103. return 0;
  104. }
  105. static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
  106. {
  107. struct pdma_regs *regs = &chan->regs;
  108. writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl);
  109. }
  110. static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
  111. {
  112. struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
  113. unsigned long flags;
  114. LIST_HEAD(head);
  115. spin_lock_irqsave(&chan->vchan.lock, flags);
  116. sf_pdma_disable_request(chan);
  117. kfree(chan->desc);
  118. chan->desc = NULL;
  119. vchan_get_all_descriptors(&chan->vchan, &head);
  120. sf_pdma_disclaim_chan(chan);
  121. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  122. vchan_dma_desc_free_list(&chan->vchan, &head);
  123. }
  124. static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
  125. dma_cookie_t cookie)
  126. {
  127. struct virt_dma_desc *vd = NULL;
  128. struct pdma_regs *regs = &chan->regs;
  129. unsigned long flags;
  130. u64 residue = 0;
  131. struct sf_pdma_desc *desc;
  132. struct dma_async_tx_descriptor *tx = NULL;
  133. spin_lock_irqsave(&chan->vchan.lock, flags);
  134. list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
  135. if (vd->tx.cookie == cookie)
  136. tx = &vd->tx;
  137. if (!tx)
  138. goto out;
  139. if (cookie == tx->chan->completed_cookie)
  140. goto out;
  141. if (cookie == tx->cookie) {
  142. residue = readq(regs->residue);
  143. } else {
  144. vd = vchan_find_desc(&chan->vchan, cookie);
  145. if (!vd)
  146. goto out;
  147. desc = to_sf_pdma_desc(vd);
  148. residue = desc->xfer_size;
  149. }
  150. out:
  151. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  152. return residue;
  153. }
  154. static enum dma_status
  155. sf_pdma_tx_status(struct dma_chan *dchan,
  156. dma_cookie_t cookie,
  157. struct dma_tx_state *txstate)
  158. {
  159. struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
  160. enum dma_status status;
  161. status = dma_cookie_status(dchan, cookie, txstate);
  162. if (txstate && status != DMA_ERROR)
  163. dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
  164. return status;
  165. }
  166. static int sf_pdma_terminate_all(struct dma_chan *dchan)
  167. {
  168. struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
  169. unsigned long flags;
  170. LIST_HEAD(head);
  171. spin_lock_irqsave(&chan->vchan.lock, flags);
  172. sf_pdma_disable_request(chan);
  173. kfree(chan->desc);
  174. chan->desc = NULL;
  175. chan->xfer_err = false;
  176. vchan_get_all_descriptors(&chan->vchan, &head);
  177. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  178. vchan_dma_desc_free_list(&chan->vchan, &head);
  179. return 0;
  180. }
  181. static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
  182. {
  183. struct pdma_regs *regs = &chan->regs;
  184. u32 v;
  185. v = PDMA_CLAIM_MASK |
  186. PDMA_ENABLE_DONE_INT_MASK |
  187. PDMA_ENABLE_ERR_INT_MASK |
  188. PDMA_RUN_MASK;
  189. writel(v, regs->ctrl);
  190. }
  191. static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
  192. {
  193. struct virt_dma_chan *vchan = &chan->vchan;
  194. struct virt_dma_desc *vdesc;
  195. if (list_empty(&vchan->desc_issued))
  196. return NULL;
  197. vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
  198. return container_of(vdesc, struct sf_pdma_desc, vdesc);
  199. }
  200. static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
  201. {
  202. struct sf_pdma_desc *desc = chan->desc;
  203. struct pdma_regs *regs = &chan->regs;
  204. if (!desc) {
  205. dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
  206. return;
  207. }
  208. writel(desc->xfer_type, regs->xfer_type);
  209. writeq(desc->xfer_size, regs->xfer_size);
  210. writeq(desc->dst_addr, regs->dst_addr);
  211. writeq(desc->src_addr, regs->src_addr);
  212. chan->desc = desc;
  213. chan->status = DMA_IN_PROGRESS;
  214. sf_pdma_enable_request(chan);
  215. }
  216. static void sf_pdma_issue_pending(struct dma_chan *dchan)
  217. {
  218. struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
  219. unsigned long flags;
  220. spin_lock_irqsave(&chan->vchan.lock, flags);
  221. if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
  222. /* vchan_issue_pending has made a check that desc in not NULL */
  223. chan->desc = sf_pdma_get_first_pending_desc(chan);
  224. sf_pdma_xfer_desc(chan);
  225. }
  226. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  227. }
  228. static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
  229. {
  230. struct sf_pdma_desc *desc;
  231. desc = to_sf_pdma_desc(vdesc);
  232. kfree(desc);
  233. }
  234. static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
  235. {
  236. struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet);
  237. unsigned long flags;
  238. spin_lock_irqsave(&chan->lock, flags);
  239. if (chan->xfer_err) {
  240. chan->retries = MAX_RETRY;
  241. chan->status = DMA_COMPLETE;
  242. chan->xfer_err = false;
  243. }
  244. spin_unlock_irqrestore(&chan->lock, flags);
  245. spin_lock_irqsave(&chan->vchan.lock, flags);
  246. list_del(&chan->desc->vdesc.node);
  247. vchan_cookie_complete(&chan->desc->vdesc);
  248. chan->desc = sf_pdma_get_first_pending_desc(chan);
  249. if (chan->desc)
  250. sf_pdma_xfer_desc(chan);
  251. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  252. }
  253. static void sf_pdma_errbh_tasklet(struct tasklet_struct *t)
  254. {
  255. struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet);
  256. struct sf_pdma_desc *desc = chan->desc;
  257. unsigned long flags;
  258. spin_lock_irqsave(&chan->lock, flags);
  259. if (chan->retries <= 0) {
  260. /* fail to recover */
  261. spin_unlock_irqrestore(&chan->lock, flags);
  262. dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
  263. } else {
  264. /* retry */
  265. chan->retries--;
  266. chan->xfer_err = true;
  267. chan->status = DMA_ERROR;
  268. sf_pdma_enable_request(chan);
  269. spin_unlock_irqrestore(&chan->lock, flags);
  270. }
  271. }
  272. static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
  273. {
  274. struct sf_pdma_chan *chan = dev_id;
  275. struct pdma_regs *regs = &chan->regs;
  276. u64 residue;
  277. spin_lock(&chan->vchan.lock);
  278. writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
  279. residue = readq(regs->residue);
  280. if (!residue) {
  281. tasklet_hi_schedule(&chan->done_tasklet);
  282. } else {
  283. /* submit next trascatioin if possible */
  284. struct sf_pdma_desc *desc = chan->desc;
  285. desc->src_addr += desc->xfer_size - residue;
  286. desc->dst_addr += desc->xfer_size - residue;
  287. desc->xfer_size = residue;
  288. sf_pdma_xfer_desc(chan);
  289. }
  290. spin_unlock(&chan->vchan.lock);
  291. return IRQ_HANDLED;
  292. }
  293. static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
  294. {
  295. struct sf_pdma_chan *chan = dev_id;
  296. struct pdma_regs *regs = &chan->regs;
  297. spin_lock(&chan->lock);
  298. writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
  299. spin_unlock(&chan->lock);
  300. tasklet_schedule(&chan->err_tasklet);
  301. return IRQ_HANDLED;
  302. }
  303. /**
  304. * sf_pdma_irq_init() - Init PDMA IRQ Handlers
  305. * @pdev: pointer of platform_device
  306. * @pdma: pointer of PDMA engine. Caller should check NULL
  307. *
  308. * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
  309. * make sure the pointer passed in are non-NULL. This function should be called
  310. * only one time during the device probe.
  311. *
  312. * Context: Any context.
  313. *
  314. * Return:
  315. * * 0 - OK to init all IRQ handlers
  316. * * -EINVAL - Fail to request IRQ
  317. */
  318. static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
  319. {
  320. int irq, r, i;
  321. struct sf_pdma_chan *chan;
  322. for (i = 0; i < pdma->n_chans; i++) {
  323. chan = &pdma->chans[i];
  324. irq = platform_get_irq(pdev, i * 2);
  325. if (irq < 0)
  326. return -EINVAL;
  327. r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
  328. dev_name(&pdev->dev), (void *)chan);
  329. if (r) {
  330. dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
  331. return -EINVAL;
  332. }
  333. chan->txirq = irq;
  334. irq = platform_get_irq(pdev, (i * 2) + 1);
  335. if (irq < 0)
  336. return -EINVAL;
  337. r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
  338. dev_name(&pdev->dev), (void *)chan);
  339. if (r) {
  340. dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
  341. return -EINVAL;
  342. }
  343. chan->errirq = irq;
  344. }
  345. return 0;
  346. }
  347. /**
  348. * sf_pdma_setup_chans() - Init settings of each channel
  349. * @pdma: pointer of PDMA engine. Caller should check NULL
  350. *
  351. * Initialize all data structure and register base. Caller should make sure
  352. * the pointer passed in are non-NULL. This function should be called only
  353. * one time during the device probe.
  354. *
  355. * Context: Any context.
  356. *
  357. * Return: none
  358. */
  359. static void sf_pdma_setup_chans(struct sf_pdma *pdma)
  360. {
  361. int i;
  362. struct sf_pdma_chan *chan;
  363. INIT_LIST_HEAD(&pdma->dma_dev.channels);
  364. for (i = 0; i < pdma->n_chans; i++) {
  365. chan = &pdma->chans[i];
  366. chan->regs.ctrl =
  367. SF_PDMA_REG_BASE(i) + PDMA_CTRL;
  368. chan->regs.xfer_type =
  369. SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
  370. chan->regs.xfer_size =
  371. SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
  372. chan->regs.dst_addr =
  373. SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
  374. chan->regs.src_addr =
  375. SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
  376. chan->regs.act_type =
  377. SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
  378. chan->regs.residue =
  379. SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
  380. chan->regs.cur_dst_addr =
  381. SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
  382. chan->regs.cur_src_addr =
  383. SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
  384. chan->pdma = pdma;
  385. chan->pm_state = RUNNING;
  386. chan->slave_id = i;
  387. chan->xfer_err = false;
  388. spin_lock_init(&chan->lock);
  389. chan->vchan.desc_free = sf_pdma_free_desc;
  390. vchan_init(&chan->vchan, &pdma->dma_dev);
  391. writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
  392. tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet);
  393. tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet);
  394. }
  395. }
  396. static int sf_pdma_probe(struct platform_device *pdev)
  397. {
  398. struct sf_pdma *pdma;
  399. struct resource *res;
  400. int ret, n_chans;
  401. const enum dma_slave_buswidth widths =
  402. DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
  403. DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
  404. DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
  405. DMA_SLAVE_BUSWIDTH_64_BYTES;
  406. ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans);
  407. if (ret) {
  408. /* backwards-compatibility for no dma-channels property */
  409. dev_dbg(&pdev->dev, "set number of channels to default value: 4\n");
  410. n_chans = PDMA_MAX_NR_CH;
  411. } else if (n_chans > PDMA_MAX_NR_CH) {
  412. dev_err(&pdev->dev, "the number of channels exceeds the maximum\n");
  413. return -EINVAL;
  414. }
  415. pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans),
  416. GFP_KERNEL);
  417. if (!pdma)
  418. return -ENOMEM;
  419. pdma->n_chans = n_chans;
  420. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  421. pdma->membase = devm_ioremap_resource(&pdev->dev, res);
  422. if (IS_ERR(pdma->membase))
  423. return PTR_ERR(pdma->membase);
  424. ret = sf_pdma_irq_init(pdev, pdma);
  425. if (ret)
  426. return ret;
  427. sf_pdma_setup_chans(pdma);
  428. pdma->dma_dev.dev = &pdev->dev;
  429. /* Setup capability */
  430. dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
  431. pdma->dma_dev.copy_align = 2;
  432. pdma->dma_dev.src_addr_widths = widths;
  433. pdma->dma_dev.dst_addr_widths = widths;
  434. pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
  435. pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
  436. pdma->dma_dev.descriptor_reuse = true;
  437. /* Setup DMA APIs */
  438. pdma->dma_dev.device_alloc_chan_resources =
  439. sf_pdma_alloc_chan_resources;
  440. pdma->dma_dev.device_free_chan_resources =
  441. sf_pdma_free_chan_resources;
  442. pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
  443. pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
  444. pdma->dma_dev.device_config = sf_pdma_slave_config;
  445. pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
  446. pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
  447. platform_set_drvdata(pdev, pdma);
  448. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  449. if (ret)
  450. dev_warn(&pdev->dev,
  451. "Failed to set DMA mask. Fall back to default.\n");
  452. ret = dma_async_device_register(&pdma->dma_dev);
  453. if (ret) {
  454. dev_err(&pdev->dev,
  455. "Can't register SiFive Platform DMA. (%d)\n", ret);
  456. return ret;
  457. }
  458. return 0;
  459. }
  460. static int sf_pdma_remove(struct platform_device *pdev)
  461. {
  462. struct sf_pdma *pdma = platform_get_drvdata(pdev);
  463. struct sf_pdma_chan *ch;
  464. int i;
  465. for (i = 0; i < pdma->n_chans; i++) {
  466. ch = &pdma->chans[i];
  467. devm_free_irq(&pdev->dev, ch->txirq, ch);
  468. devm_free_irq(&pdev->dev, ch->errirq, ch);
  469. list_del(&ch->vchan.chan.device_node);
  470. tasklet_kill(&ch->vchan.task);
  471. tasklet_kill(&ch->done_tasklet);
  472. tasklet_kill(&ch->err_tasklet);
  473. }
  474. dma_async_device_unregister(&pdma->dma_dev);
  475. return 0;
  476. }
  477. static const struct of_device_id sf_pdma_dt_ids[] = {
  478. { .compatible = "sifive,fu540-c000-pdma" },
  479. { .compatible = "sifive,pdma0" },
  480. {},
  481. };
  482. MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
  483. static struct platform_driver sf_pdma_driver = {
  484. .probe = sf_pdma_probe,
  485. .remove = sf_pdma_remove,
  486. .driver = {
  487. .name = "sf-pdma",
  488. .of_match_table = sf_pdma_dt_ids,
  489. },
  490. };
  491. static int __init sf_pdma_init(void)
  492. {
  493. return platform_driver_register(&sf_pdma_driver);
  494. }
  495. static void __exit sf_pdma_exit(void)
  496. {
  497. platform_driver_unregister(&sf_pdma_driver);
  498. }
  499. /* do early init */
  500. subsys_initcall(sf_pdma_init);
  501. module_exit(sf_pdma_exit);
  502. MODULE_LICENSE("GPL v2");
  503. MODULE_DESCRIPTION("SiFive Platform DMA driver");
  504. MODULE_AUTHOR("Green Wan <[email protected]>");