dma-sh.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * arch/sh/drivers/dma/dma-sh.c
  4. *
  5. * SuperH On-chip DMAC Support
  6. *
  7. * Copyright (C) 2000 Takashi YOSHII
  8. * Copyright (C) 2003, 2004 Paul Mundt
  9. * Copyright (C) 2005 Andriy Skulysh
  10. */
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. #include <linux/io.h>
  15. #include <mach-dreamcast/mach/dma.h>
  16. #include <asm/dma.h>
  17. #include <asm/dma-register.h>
  18. #include <cpu/dma-register.h>
  19. #include <cpu/dma.h>
  20. /*
  21. * Some of the SoCs feature two DMAC modules. In such a case, the channels are
  22. * distributed equally among them.
  23. */
  24. #ifdef SH_DMAC_BASE1
  25. #define SH_DMAC_NR_MD_CH (CONFIG_NR_ONCHIP_DMA_CHANNELS / 2)
  26. #else
  27. #define SH_DMAC_NR_MD_CH CONFIG_NR_ONCHIP_DMA_CHANNELS
  28. #endif
  29. #define SH_DMAC_CH_SZ 0x10
  30. /*
  31. * Define the default configuration for dual address memory-memory transfer.
  32. * The 0x400 value represents auto-request, external->external.
  33. */
  34. #define RS_DUAL (DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT))
  35. static unsigned long dma_find_base(unsigned int chan)
  36. {
  37. unsigned long base = SH_DMAC_BASE0;
  38. #ifdef SH_DMAC_BASE1
  39. if (chan >= SH_DMAC_NR_MD_CH)
  40. base = SH_DMAC_BASE1;
  41. #endif
  42. return base;
  43. }
  44. static unsigned long dma_base_addr(unsigned int chan)
  45. {
  46. unsigned long base = dma_find_base(chan);
  47. chan = (chan % SH_DMAC_NR_MD_CH) * SH_DMAC_CH_SZ;
  48. /* DMAOR is placed inside the channel register space. Step over it. */
  49. if (chan >= DMAOR)
  50. base += SH_DMAC_CH_SZ;
  51. return base + chan;
  52. }
  53. #ifdef CONFIG_SH_DMA_IRQ_MULTI
  54. static inline unsigned int get_dmte_irq(unsigned int chan)
  55. {
  56. return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ;
  57. }
  58. #else
  59. static unsigned int dmte_irq_map[] = {
  60. DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3,
  61. #ifdef DMTE4_IRQ
  62. DMTE4_IRQ, DMTE4_IRQ + 1,
  63. #endif
  64. #ifdef DMTE6_IRQ
  65. DMTE6_IRQ, DMTE6_IRQ + 1,
  66. #endif
  67. #ifdef DMTE8_IRQ
  68. DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ,
  69. #endif
  70. };
  71. static inline unsigned int get_dmte_irq(unsigned int chan)
  72. {
  73. return dmte_irq_map[chan];
  74. }
  75. #endif
  76. /*
  77. * We determine the correct shift size based off of the CHCR transmit size
  78. * for the given channel. Since we know that it will take:
  79. *
  80. * info->count >> ts_shift[transmit_size]
  81. *
  82. * iterations to complete the transfer.
  83. */
  84. static unsigned int ts_shift[] = TS_SHIFT;
  85. static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
  86. {
  87. u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
  88. int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
  89. ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
  90. return ts_shift[cnt];
  91. }
  92. /*
  93. * The transfer end interrupt must read the chcr register to end the
  94. * hardware interrupt active condition.
  95. * Besides that it needs to waken any waiting process, which should handle
  96. * setting up the next transfer.
  97. */
  98. static irqreturn_t dma_tei(int irq, void *dev_id)
  99. {
  100. struct dma_channel *chan = dev_id;
  101. u32 chcr;
  102. chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
  103. if (!(chcr & CHCR_TE))
  104. return IRQ_NONE;
  105. chcr &= ~(CHCR_IE | CHCR_DE);
  106. __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
  107. wake_up(&chan->wait_queue);
  108. return IRQ_HANDLED;
  109. }
  110. static int sh_dmac_request_dma(struct dma_channel *chan)
  111. {
  112. if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
  113. return 0;
  114. return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED,
  115. chan->dev_id, chan);
  116. }
  117. static void sh_dmac_free_dma(struct dma_channel *chan)
  118. {
  119. free_irq(get_dmte_irq(chan->chan), chan);
  120. }
  121. static int
  122. sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
  123. {
  124. if (!chcr)
  125. chcr = RS_DUAL | CHCR_IE;
  126. if (chcr & CHCR_IE) {
  127. chcr &= ~CHCR_IE;
  128. chan->flags |= DMA_TEI_CAPABLE;
  129. } else {
  130. chan->flags &= ~DMA_TEI_CAPABLE;
  131. }
  132. __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
  133. chan->flags |= DMA_CONFIGURED;
  134. return 0;
  135. }
  136. static void sh_dmac_enable_dma(struct dma_channel *chan)
  137. {
  138. int irq;
  139. u32 chcr;
  140. chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
  141. chcr |= CHCR_DE;
  142. if (chan->flags & DMA_TEI_CAPABLE)
  143. chcr |= CHCR_IE;
  144. __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
  145. if (chan->flags & DMA_TEI_CAPABLE) {
  146. irq = get_dmte_irq(chan->chan);
  147. enable_irq(irq);
  148. }
  149. }
  150. static void sh_dmac_disable_dma(struct dma_channel *chan)
  151. {
  152. int irq;
  153. u32 chcr;
  154. if (chan->flags & DMA_TEI_CAPABLE) {
  155. irq = get_dmte_irq(chan->chan);
  156. disable_irq(irq);
  157. }
  158. chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
  159. chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
  160. __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
  161. }
  162. static int sh_dmac_xfer_dma(struct dma_channel *chan)
  163. {
  164. /*
  165. * If we haven't pre-configured the channel with special flags, use
  166. * the defaults.
  167. */
  168. if (unlikely(!(chan->flags & DMA_CONFIGURED)))
  169. sh_dmac_configure_channel(chan, 0);
  170. sh_dmac_disable_dma(chan);
  171. /*
  172. * Single-address mode usage note!
  173. *
  174. * It's important that we don't accidentally write any value to SAR/DAR
  175. * (this includes 0) that hasn't been directly specified by the user if
  176. * we're in single-address mode.
  177. *
  178. * In this case, only one address can be defined, anything else will
  179. * result in a DMA address error interrupt (at least on the SH-4),
  180. * which will subsequently halt the transfer.
  181. *
  182. * Channel 2 on the Dreamcast is a special case, as this is used for
  183. * cascading to the PVR2 DMAC. In this case, we still need to write
  184. * SAR and DAR, regardless of value, in order for cascading to work.
  185. */
  186. if (chan->sar || (mach_is_dreamcast() &&
  187. chan->chan == PVR2_CASCADE_CHAN))
  188. __raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR));
  189. if (chan->dar || (mach_is_dreamcast() &&
  190. chan->chan == PVR2_CASCADE_CHAN))
  191. __raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR));
  192. __raw_writel(chan->count >> calc_xmit_shift(chan),
  193. (dma_base_addr(chan->chan) + TCR));
  194. sh_dmac_enable_dma(chan);
  195. return 0;
  196. }
  197. static int sh_dmac_get_dma_residue(struct dma_channel *chan)
  198. {
  199. if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE))
  200. return 0;
  201. return __raw_readl(dma_base_addr(chan->chan) + TCR)
  202. << calc_xmit_shift(chan);
  203. }
  204. /*
  205. * DMAOR handling
  206. */
  207. #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
  208. defined(CONFIG_CPU_SUBTYPE_SH7724) || \
  209. defined(CONFIG_CPU_SUBTYPE_SH7780) || \
  210. defined(CONFIG_CPU_SUBTYPE_SH7785)
  211. #define NR_DMAOR 2
  212. #else
  213. #define NR_DMAOR 1
  214. #endif
  215. #define dmaor_read_reg(n) __raw_readw(dma_find_base((n) * \
  216. SH_DMAC_NR_MD_CH) + DMAOR)
  217. #define dmaor_write_reg(n, data) __raw_writew(data, \
  218. dma_find_base((n) * \
  219. SH_DMAC_NR_MD_CH) + DMAOR)
  220. static inline int dmaor_reset(int no)
  221. {
  222. unsigned long dmaor = dmaor_read_reg(no);
  223. /* Try to clear the error flags first, incase they are set */
  224. dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
  225. dmaor_write_reg(no, dmaor);
  226. dmaor |= DMAOR_INIT;
  227. dmaor_write_reg(no, dmaor);
  228. /* See if we got an error again */
  229. if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) {
  230. printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
  231. return -EINVAL;
  232. }
  233. return 0;
  234. }
  235. /*
  236. * DMAE handling
  237. */
  238. #ifdef CONFIG_CPU_SH4
  239. #if defined(DMAE1_IRQ)
  240. #define NR_DMAE 2
  241. #else
  242. #define NR_DMAE 1
  243. #endif
  244. static const char *dmae_name[] = {
  245. "DMAC Address Error0",
  246. "DMAC Address Error1"
  247. };
  248. #ifdef CONFIG_SH_DMA_IRQ_MULTI
  249. static inline unsigned int get_dma_error_irq(int n)
  250. {
  251. return get_dmte_irq(n * 6);
  252. }
  253. #else
  254. static unsigned int dmae_irq_map[] = {
  255. DMAE0_IRQ,
  256. #ifdef DMAE1_IRQ
  257. DMAE1_IRQ,
  258. #endif
  259. };
  260. static inline unsigned int get_dma_error_irq(int n)
  261. {
  262. return dmae_irq_map[n];
  263. }
  264. #endif
  265. static irqreturn_t dma_err(int irq, void *dummy)
  266. {
  267. int i;
  268. for (i = 0; i < NR_DMAOR; i++)
  269. dmaor_reset(i);
  270. disable_irq(irq);
  271. return IRQ_HANDLED;
  272. }
  273. static int dmae_irq_init(void)
  274. {
  275. int n;
  276. for (n = 0; n < NR_DMAE; n++) {
  277. int i = request_irq(get_dma_error_irq(n), dma_err,
  278. IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
  279. if (unlikely(i < 0)) {
  280. printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
  281. return i;
  282. }
  283. }
  284. return 0;
  285. }
  286. static void dmae_irq_free(void)
  287. {
  288. int n;
  289. for (n = 0; n < NR_DMAE; n++)
  290. free_irq(get_dma_error_irq(n), NULL);
  291. }
  292. #else
  293. static inline int dmae_irq_init(void)
  294. {
  295. return 0;
  296. }
  297. static void dmae_irq_free(void)
  298. {
  299. }
  300. #endif
  301. static struct dma_ops sh_dmac_ops = {
  302. .request = sh_dmac_request_dma,
  303. .free = sh_dmac_free_dma,
  304. .get_residue = sh_dmac_get_dma_residue,
  305. .xfer = sh_dmac_xfer_dma,
  306. .configure = sh_dmac_configure_channel,
  307. };
  308. static struct dma_info sh_dmac_info = {
  309. .name = "sh_dmac",
  310. .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS,
  311. .ops = &sh_dmac_ops,
  312. .flags = DMAC_CHANNELS_TEI_CAPABLE,
  313. };
  314. static int __init sh_dmac_init(void)
  315. {
  316. struct dma_info *info = &sh_dmac_info;
  317. int i, rc;
  318. /*
  319. * Initialize DMAE, for parts that support it.
  320. */
  321. rc = dmae_irq_init();
  322. if (unlikely(rc != 0))
  323. return rc;
  324. /*
  325. * Initialize DMAOR, and clean up any error flags that may have
  326. * been set.
  327. */
  328. for (i = 0; i < NR_DMAOR; i++) {
  329. rc = dmaor_reset(i);
  330. if (unlikely(rc != 0))
  331. return rc;
  332. }
  333. return register_dmac(info);
  334. }
  335. static void __exit sh_dmac_exit(void)
  336. {
  337. dmae_irq_free();
  338. unregister_dmac(&sh_dmac_info);
  339. }
  340. subsys_initcall(sh_dmac_init);
  341. module_exit(sh_dmac_exit);
  342. MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
  343. MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
  344. MODULE_LICENSE("GPL v2");