idma32.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2013,2018,2020-2021 Intel Corporation
  3. #include <linux/bitops.h>
  4. #include <linux/dmaengine.h>
  5. #include <linux/errno.h>
  6. #include <linux/io.h>
  7. #include <linux/pci.h>
  8. #include <linux/slab.h>
  9. #include <linux/types.h>
  10. #include "internal.h"
  11. #define DMA_CTL_CH(x) (0x1000 + (x) * 4)
  12. #define DMA_SRC_ADDR_FILLIN(x) (0x1100 + (x) * 4)
  13. #define DMA_DST_ADDR_FILLIN(x) (0x1200 + (x) * 4)
  14. #define DMA_XBAR_SEL(x) (0x1300 + (x) * 4)
  15. #define DMA_REGACCESS_CHID_CFG (0x1400)
  16. #define CTL_CH_TRANSFER_MODE_MASK GENMASK(1, 0)
  17. #define CTL_CH_TRANSFER_MODE_S2S 0
  18. #define CTL_CH_TRANSFER_MODE_S2D 1
  19. #define CTL_CH_TRANSFER_MODE_D2S 2
  20. #define CTL_CH_TRANSFER_MODE_D2D 3
  21. #define CTL_CH_RD_RS_MASK GENMASK(4, 3)
  22. #define CTL_CH_WR_RS_MASK GENMASK(6, 5)
  23. #define CTL_CH_RD_NON_SNOOP_BIT BIT(8)
  24. #define CTL_CH_WR_NON_SNOOP_BIT BIT(9)
  25. #define XBAR_SEL_DEVID_MASK GENMASK(15, 0)
  26. #define XBAR_SEL_RX_TX_BIT BIT(16)
  27. #define XBAR_SEL_RX_TX_SHIFT 16
  28. #define REGACCESS_CHID_MASK GENMASK(2, 0)
  29. static unsigned int idma32_get_slave_devfn(struct dw_dma_chan *dwc)
  30. {
  31. struct device *slave = dwc->chan.slave;
  32. if (!slave || !dev_is_pci(slave))
  33. return 0;
  34. return to_pci_dev(slave)->devfn;
  35. }
  36. static void idma32_initialize_chan_xbar(struct dw_dma_chan *dwc)
  37. {
  38. struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  39. void __iomem *misc = __dw_regs(dw);
  40. u32 cfghi = 0, cfglo = 0;
  41. u8 dst_id, src_id;
  42. u32 value;
  43. /* DMA Channel ID Configuration register must be programmed first */
  44. value = readl(misc + DMA_REGACCESS_CHID_CFG);
  45. value &= ~REGACCESS_CHID_MASK;
  46. value |= dwc->chan.chan_id;
  47. writel(value, misc + DMA_REGACCESS_CHID_CFG);
  48. /* Configure channel attributes */
  49. value = readl(misc + DMA_CTL_CH(dwc->chan.chan_id));
  50. value &= ~(CTL_CH_RD_NON_SNOOP_BIT | CTL_CH_WR_NON_SNOOP_BIT);
  51. value &= ~(CTL_CH_RD_RS_MASK | CTL_CH_WR_RS_MASK);
  52. value &= ~CTL_CH_TRANSFER_MODE_MASK;
  53. switch (dwc->direction) {
  54. case DMA_MEM_TO_DEV:
  55. value |= CTL_CH_TRANSFER_MODE_D2S;
  56. value |= CTL_CH_WR_NON_SNOOP_BIT;
  57. break;
  58. case DMA_DEV_TO_MEM:
  59. value |= CTL_CH_TRANSFER_MODE_S2D;
  60. value |= CTL_CH_RD_NON_SNOOP_BIT;
  61. break;
  62. default:
  63. /*
  64. * Memory-to-Memory and Device-to-Device are ignored for now.
  65. *
  66. * For Memory-to-Memory transfers we would need to set mode
  67. * and disable snooping on both sides.
  68. */
  69. return;
  70. }
  71. writel(value, misc + DMA_CTL_CH(dwc->chan.chan_id));
  72. /* Configure crossbar selection */
  73. value = readl(misc + DMA_XBAR_SEL(dwc->chan.chan_id));
  74. /* DEVFN selection */
  75. value &= ~XBAR_SEL_DEVID_MASK;
  76. value |= idma32_get_slave_devfn(dwc);
  77. switch (dwc->direction) {
  78. case DMA_MEM_TO_DEV:
  79. value |= XBAR_SEL_RX_TX_BIT;
  80. break;
  81. case DMA_DEV_TO_MEM:
  82. value &= ~XBAR_SEL_RX_TX_BIT;
  83. break;
  84. default:
  85. /* Memory-to-Memory and Device-to-Device are ignored for now */
  86. return;
  87. }
  88. writel(value, misc + DMA_XBAR_SEL(dwc->chan.chan_id));
  89. /* Configure DMA channel low and high registers */
  90. switch (dwc->direction) {
  91. case DMA_MEM_TO_DEV:
  92. dst_id = dwc->chan.chan_id;
  93. src_id = dwc->dws.src_id;
  94. break;
  95. case DMA_DEV_TO_MEM:
  96. dst_id = dwc->dws.dst_id;
  97. src_id = dwc->chan.chan_id;
  98. break;
  99. default:
  100. /* Memory-to-Memory and Device-to-Device are ignored for now */
  101. return;
  102. }
  103. /* Set default burst alignment */
  104. cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
  105. /* Low 4 bits of the request lines */
  106. cfghi |= IDMA32C_CFGH_DST_PER(dst_id & 0xf);
  107. cfghi |= IDMA32C_CFGH_SRC_PER(src_id & 0xf);
  108. /* Request line extension (2 bits) */
  109. cfghi |= IDMA32C_CFGH_DST_PER_EXT(dst_id >> 4 & 0x3);
  110. cfghi |= IDMA32C_CFGH_SRC_PER_EXT(src_id >> 4 & 0x3);
  111. channel_writel(dwc, CFG_LO, cfglo);
  112. channel_writel(dwc, CFG_HI, cfghi);
  113. }
  114. static void idma32_initialize_chan_generic(struct dw_dma_chan *dwc)
  115. {
  116. u32 cfghi = 0;
  117. u32 cfglo = 0;
  118. /* Set default burst alignment */
  119. cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
  120. /* Low 4 bits of the request lines */
  121. cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
  122. cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
  123. /* Request line extension (2 bits) */
  124. cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
  125. cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
  126. channel_writel(dwc, CFG_LO, cfglo);
  127. channel_writel(dwc, CFG_HI, cfghi);
  128. }
  129. static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain)
  130. {
  131. u32 cfglo = channel_readl(dwc, CFG_LO);
  132. if (drain)
  133. cfglo |= IDMA32C_CFGL_CH_DRAIN;
  134. channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
  135. }
  136. static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain)
  137. {
  138. u32 cfglo = channel_readl(dwc, CFG_LO);
  139. if (drain)
  140. cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
  141. channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
  142. }
  143. static u32 idma32_bytes2block(struct dw_dma_chan *dwc,
  144. size_t bytes, unsigned int width, size_t *len)
  145. {
  146. u32 block;
  147. if (bytes > dwc->block_size) {
  148. block = dwc->block_size;
  149. *len = dwc->block_size;
  150. } else {
  151. block = bytes;
  152. *len = bytes;
  153. }
  154. return block;
  155. }
  156. static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
  157. {
  158. return IDMA32C_CTLH_BLOCK_TS(block);
  159. }
  160. static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
  161. {
  162. struct dma_slave_config *sconfig = &dwc->dma_sconfig;
  163. u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
  164. u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
  165. return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
  166. DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
  167. }
  168. static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
  169. {
  170. *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0;
  171. }
  172. static void idma32_set_device_name(struct dw_dma *dw, int id)
  173. {
  174. snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id);
  175. }
  176. /*
  177. * Program FIFO size of channels.
  178. *
  179. * By default full FIFO (512 bytes) is assigned to channel 0. Here we
  180. * slice FIFO on equal parts between channels.
  181. */
  182. static void idma32_fifo_partition(struct dw_dma *dw)
  183. {
  184. u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
  185. IDMA32C_FP_UPDATE;
  186. u64 fifo_partition = 0;
  187. /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
  188. fifo_partition |= value << 0;
  189. /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
  190. fifo_partition |= value << 32;
  191. /* Program FIFO Partition registers - 64 bytes per channel */
  192. idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
  193. idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
  194. }
  195. static void idma32_disable(struct dw_dma *dw)
  196. {
  197. do_dw_dma_off(dw);
  198. idma32_fifo_partition(dw);
  199. }
  200. static void idma32_enable(struct dw_dma *dw)
  201. {
  202. idma32_fifo_partition(dw);
  203. do_dw_dma_on(dw);
  204. }
  205. int idma32_dma_probe(struct dw_dma_chip *chip)
  206. {
  207. struct dw_dma *dw;
  208. dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
  209. if (!dw)
  210. return -ENOMEM;
  211. /* Channel operations */
  212. if (chip->pdata->quirks & DW_DMA_QUIRK_XBAR_PRESENT)
  213. dw->initialize_chan = idma32_initialize_chan_xbar;
  214. else
  215. dw->initialize_chan = idma32_initialize_chan_generic;
  216. dw->suspend_chan = idma32_suspend_chan;
  217. dw->resume_chan = idma32_resume_chan;
  218. dw->prepare_ctllo = idma32_prepare_ctllo;
  219. dw->encode_maxburst = idma32_encode_maxburst;
  220. dw->bytes2block = idma32_bytes2block;
  221. dw->block2bytes = idma32_block2bytes;
  222. /* Device operations */
  223. dw->set_device_name = idma32_set_device_name;
  224. dw->disable = idma32_disable;
  225. dw->enable = idma32_enable;
  226. chip->dw = dw;
  227. return do_dma_probe(chip);
  228. }
  229. EXPORT_SYMBOL_GPL(idma32_dma_probe);
  230. int idma32_dma_remove(struct dw_dma_chip *chip)
  231. {
  232. return do_dma_remove(chip);
  233. }
  234. EXPORT_SYMBOL_GPL(idma32_dma_remove);