sun4i-dma.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2014 Emilio López
  4. * Emilio López <[email protected]>
  5. */
  6. #include <linux/bitmap.h>
  7. #include <linux/bitops.h>
  8. #include <linux/clk.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dmaengine.h>
  11. #include <linux/dmapool.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. #include <linux/of_dma.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include "virt-dma.h"
  19. /** Common macros to normal and dedicated DMA registers **/
  20. #define SUN4I_DMA_CFG_LOADING BIT(31)
  21. #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25)
  22. #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23)
  23. #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21)
  24. #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16)
  25. #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9)
  26. #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7)
  27. #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
  28. #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
  29. /** Normal DMA register values **/
  30. /* Normal DMA source/destination data request type values */
  31. #define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
  32. #define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
  33. /** Normal DMA register layout **/
  34. /* Dedicated DMA source/destination address mode values */
  35. #define SUN4I_NDMA_ADDR_MODE_LINEAR 0
  36. #define SUN4I_NDMA_ADDR_MODE_IO 1
  37. /* Normal DMA configuration register layout */
  38. #define SUN4I_NDMA_CFG_CONT_MODE BIT(30)
  39. #define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27)
  40. #define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22)
  41. #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
  42. #define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
  43. /** Dedicated DMA register values **/
  44. /* Dedicated DMA source/destination address mode values */
  45. #define SUN4I_DDMA_ADDR_MODE_LINEAR 0
  46. #define SUN4I_DDMA_ADDR_MODE_IO 1
  47. #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2
  48. #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3
  49. /* Dedicated DMA source/destination data request type values */
  50. #define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
  51. #define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
  52. /** Dedicated DMA register layout **/
  53. /* Dedicated DMA configuration register layout */
  54. #define SUN4I_DDMA_CFG_BUSY BIT(30)
  55. #define SUN4I_DDMA_CFG_CONT_MODE BIT(29)
  56. #define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28)
  57. #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
  58. #define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12)
  59. /* Dedicated DMA parameter register layout */
  60. #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24)
  61. #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16)
  62. #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8)
  63. #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0)
  64. /** DMA register offsets **/
  65. /* General register offsets */
  66. #define SUN4I_DMA_IRQ_ENABLE_REG 0x0
  67. #define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4
  68. /* Normal DMA register offsets */
  69. #define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20)
  70. #define SUN4I_NDMA_CFG_REG 0x0
  71. #define SUN4I_NDMA_SRC_ADDR_REG 0x4
  72. #define SUN4I_NDMA_DST_ADDR_REG 0x8
  73. #define SUN4I_NDMA_BYTE_COUNT_REG 0xC
  74. /* Dedicated DMA register offsets */
  75. #define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20)
  76. #define SUN4I_DDMA_CFG_REG 0x0
  77. #define SUN4I_DDMA_SRC_ADDR_REG 0x4
  78. #define SUN4I_DDMA_DST_ADDR_REG 0x8
  79. #define SUN4I_DDMA_BYTE_COUNT_REG 0xC
  80. #define SUN4I_DDMA_PARA_REG 0x18
  81. /** DMA Driver **/
  82. /*
  83. * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
  84. * that's 16 channels. As for endpoints, there's 29 and 21
  85. * respectively. Given that the Normal DMA endpoints (other than
  86. * SDRAM) can be used as tx/rx, we need 78 vchans in total
  87. */
  88. #define SUN4I_NDMA_NR_MAX_CHANNELS 8
  89. #define SUN4I_DDMA_NR_MAX_CHANNELS 8
  90. #define SUN4I_DMA_NR_MAX_CHANNELS \
  91. (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
  92. #define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1)
  93. #define SUN4I_DDMA_NR_MAX_VCHANS 21
  94. #define SUN4I_DMA_NR_MAX_VCHANS \
  95. (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
  96. /* This set of SUN4I_DDMA timing parameters were found experimentally while
  97. * working with the SPI driver and seem to make it behave correctly */
  98. #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
  99. (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \
  100. SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \
  101. SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
  102. SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
  103. /*
  104. * Normal DMA supports individual transfers (segments) up to 128k.
  105. * Dedicated DMA supports transfers up to 16M. We can only report
  106. * one size limit, so we have to use the smaller value.
  107. */
  108. #define SUN4I_NDMA_MAX_SEG_SIZE SZ_128K
  109. #define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
  110. #define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
  111. struct sun4i_dma_pchan {
  112. /* Register base of channel */
  113. void __iomem *base;
  114. /* vchan currently being serviced */
  115. struct sun4i_dma_vchan *vchan;
  116. /* Is this a dedicated pchan? */
  117. int is_dedicated;
  118. };
  119. struct sun4i_dma_vchan {
  120. struct virt_dma_chan vc;
  121. struct dma_slave_config cfg;
  122. struct sun4i_dma_pchan *pchan;
  123. struct sun4i_dma_promise *processing;
  124. struct sun4i_dma_contract *contract;
  125. u8 endpoint;
  126. int is_dedicated;
  127. };
  128. struct sun4i_dma_promise {
  129. u32 cfg;
  130. u32 para;
  131. dma_addr_t src;
  132. dma_addr_t dst;
  133. size_t len;
  134. struct list_head list;
  135. };
  136. /* A contract is a set of promises */
  137. struct sun4i_dma_contract {
  138. struct virt_dma_desc vd;
  139. struct list_head demands;
  140. struct list_head completed_demands;
  141. bool is_cyclic : 1;
  142. bool use_half_int : 1;
  143. };
  144. struct sun4i_dma_dev {
  145. DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
  146. struct dma_device slave;
  147. struct sun4i_dma_pchan *pchans;
  148. struct sun4i_dma_vchan *vchans;
  149. void __iomem *base;
  150. struct clk *clk;
  151. int irq;
  152. spinlock_t lock;
  153. };
  154. static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
  155. {
  156. return container_of(dev, struct sun4i_dma_dev, slave);
  157. }
  158. static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
  159. {
  160. return container_of(chan, struct sun4i_dma_vchan, vc.chan);
  161. }
  162. static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
  163. {
  164. return container_of(vd, struct sun4i_dma_contract, vd);
  165. }
  166. static struct device *chan2dev(struct dma_chan *chan)
  167. {
  168. return &chan->dev->device;
  169. }
  170. static int convert_burst(u32 maxburst)
  171. {
  172. if (maxburst > 8)
  173. return -EINVAL;
  174. /* 1 -> 0, 4 -> 1, 8 -> 2 */
  175. return (maxburst >> 2);
  176. }
  177. static int convert_buswidth(enum dma_slave_buswidth addr_width)
  178. {
  179. if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
  180. return -EINVAL;
  181. /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
  182. return (addr_width >> 1);
  183. }
  184. static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
  185. {
  186. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  187. vchan_free_chan_resources(&vchan->vc);
  188. }
  189. static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
  190. struct sun4i_dma_vchan *vchan)
  191. {
  192. struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
  193. unsigned long flags;
  194. int i, max;
  195. /*
  196. * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
  197. * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
  198. */
  199. if (vchan->is_dedicated) {
  200. i = SUN4I_NDMA_NR_MAX_CHANNELS;
  201. max = SUN4I_DMA_NR_MAX_CHANNELS;
  202. } else {
  203. i = 0;
  204. max = SUN4I_NDMA_NR_MAX_CHANNELS;
  205. }
  206. spin_lock_irqsave(&priv->lock, flags);
  207. for_each_clear_bit_from(i, priv->pchans_used, max) {
  208. pchan = &pchans[i];
  209. pchan->vchan = vchan;
  210. set_bit(i, priv->pchans_used);
  211. break;
  212. }
  213. spin_unlock_irqrestore(&priv->lock, flags);
  214. return pchan;
  215. }
  216. static void release_pchan(struct sun4i_dma_dev *priv,
  217. struct sun4i_dma_pchan *pchan)
  218. {
  219. unsigned long flags;
  220. int nr = pchan - priv->pchans;
  221. spin_lock_irqsave(&priv->lock, flags);
  222. pchan->vchan = NULL;
  223. clear_bit(nr, priv->pchans_used);
  224. spin_unlock_irqrestore(&priv->lock, flags);
  225. }
  226. static void configure_pchan(struct sun4i_dma_pchan *pchan,
  227. struct sun4i_dma_promise *d)
  228. {
  229. /*
  230. * Configure addresses and misc parameters depending on type
  231. * SUN4I_DDMA has an extra field with timing parameters
  232. */
  233. if (pchan->is_dedicated) {
  234. writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
  235. writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
  236. writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
  237. writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
  238. writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
  239. } else {
  240. writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
  241. writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
  242. writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
  243. writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
  244. }
  245. }
  246. static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
  247. struct sun4i_dma_pchan *pchan,
  248. int half, int end)
  249. {
  250. u32 reg;
  251. int pchan_number = pchan - priv->pchans;
  252. unsigned long flags;
  253. spin_lock_irqsave(&priv->lock, flags);
  254. reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  255. if (half)
  256. reg |= BIT(pchan_number * 2);
  257. else
  258. reg &= ~BIT(pchan_number * 2);
  259. if (end)
  260. reg |= BIT(pchan_number * 2 + 1);
  261. else
  262. reg &= ~BIT(pchan_number * 2 + 1);
  263. writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  264. spin_unlock_irqrestore(&priv->lock, flags);
  265. }
  266. /*
  267. * Execute pending operations on a vchan
  268. *
  269. * When given a vchan, this function will try to acquire a suitable
  270. * pchan and, if successful, will configure it to fulfill a promise
  271. * from the next pending contract.
  272. *
  273. * This function must be called with &vchan->vc.lock held.
  274. */
  275. static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
  276. struct sun4i_dma_vchan *vchan)
  277. {
  278. struct sun4i_dma_promise *promise = NULL;
  279. struct sun4i_dma_contract *contract = NULL;
  280. struct sun4i_dma_pchan *pchan;
  281. struct virt_dma_desc *vd;
  282. int ret;
  283. lockdep_assert_held(&vchan->vc.lock);
  284. /* We need a pchan to do anything, so secure one if available */
  285. pchan = find_and_use_pchan(priv, vchan);
  286. if (!pchan)
  287. return -EBUSY;
  288. /*
  289. * Channel endpoints must not be repeated, so if this vchan
  290. * has already submitted some work, we can't do anything else
  291. */
  292. if (vchan->processing) {
  293. dev_dbg(chan2dev(&vchan->vc.chan),
  294. "processing something to this endpoint already\n");
  295. ret = -EBUSY;
  296. goto release_pchan;
  297. }
  298. do {
  299. /* Figure out which contract we're working with today */
  300. vd = vchan_next_desc(&vchan->vc);
  301. if (!vd) {
  302. dev_dbg(chan2dev(&vchan->vc.chan),
  303. "No pending contract found");
  304. ret = 0;
  305. goto release_pchan;
  306. }
  307. contract = to_sun4i_dma_contract(vd);
  308. if (list_empty(&contract->demands)) {
  309. /* The contract has been completed so mark it as such */
  310. list_del(&contract->vd.node);
  311. vchan_cookie_complete(&contract->vd);
  312. dev_dbg(chan2dev(&vchan->vc.chan),
  313. "Empty contract found and marked complete");
  314. }
  315. } while (list_empty(&contract->demands));
  316. /* Now find out what we need to do */
  317. promise = list_first_entry(&contract->demands,
  318. struct sun4i_dma_promise, list);
  319. vchan->processing = promise;
  320. /* ... and make it reality */
  321. if (promise) {
  322. vchan->contract = contract;
  323. vchan->pchan = pchan;
  324. set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
  325. configure_pchan(pchan, promise);
  326. }
  327. return 0;
  328. release_pchan:
  329. release_pchan(priv, pchan);
  330. return ret;
  331. }
  332. static int sanitize_config(struct dma_slave_config *sconfig,
  333. enum dma_transfer_direction direction)
  334. {
  335. switch (direction) {
  336. case DMA_MEM_TO_DEV:
  337. if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
  338. !sconfig->dst_maxburst)
  339. return -EINVAL;
  340. if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  341. sconfig->src_addr_width = sconfig->dst_addr_width;
  342. if (!sconfig->src_maxburst)
  343. sconfig->src_maxburst = sconfig->dst_maxburst;
  344. break;
  345. case DMA_DEV_TO_MEM:
  346. if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
  347. !sconfig->src_maxburst)
  348. return -EINVAL;
  349. if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  350. sconfig->dst_addr_width = sconfig->src_addr_width;
  351. if (!sconfig->dst_maxburst)
  352. sconfig->dst_maxburst = sconfig->src_maxburst;
  353. break;
  354. default:
  355. return 0;
  356. }
  357. return 0;
  358. }
  359. /*
  360. * Generate a promise, to be used in a normal DMA contract.
  361. *
  362. * A NDMA promise contains all the information required to program the
  363. * normal part of the DMA Engine and get data copied. A non-executed
  364. * promise will live in the demands list on a contract. Once it has been
  365. * completed, it will be moved to the completed demands list for later freeing.
  366. * All linked promises will be freed when the corresponding contract is freed
  367. */
  368. static struct sun4i_dma_promise *
  369. generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
  370. size_t len, struct dma_slave_config *sconfig,
  371. enum dma_transfer_direction direction)
  372. {
  373. struct sun4i_dma_promise *promise;
  374. int ret;
  375. ret = sanitize_config(sconfig, direction);
  376. if (ret)
  377. return NULL;
  378. promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
  379. if (!promise)
  380. return NULL;
  381. promise->src = src;
  382. promise->dst = dest;
  383. promise->len = len;
  384. promise->cfg = SUN4I_DMA_CFG_LOADING |
  385. SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
  386. dev_dbg(chan2dev(chan),
  387. "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
  388. sconfig->src_maxburst, sconfig->dst_maxburst,
  389. sconfig->src_addr_width, sconfig->dst_addr_width);
  390. /* Source burst */
  391. ret = convert_burst(sconfig->src_maxburst);
  392. if (ret < 0)
  393. goto fail;
  394. promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
  395. /* Destination burst */
  396. ret = convert_burst(sconfig->dst_maxburst);
  397. if (ret < 0)
  398. goto fail;
  399. promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
  400. /* Source bus width */
  401. ret = convert_buswidth(sconfig->src_addr_width);
  402. if (ret < 0)
  403. goto fail;
  404. promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
  405. /* Destination bus width */
  406. ret = convert_buswidth(sconfig->dst_addr_width);
  407. if (ret < 0)
  408. goto fail;
  409. promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
  410. return promise;
  411. fail:
  412. kfree(promise);
  413. return NULL;
  414. }
  415. /*
  416. * Generate a promise, to be used in a dedicated DMA contract.
  417. *
  418. * A DDMA promise contains all the information required to program the
  419. * Dedicated part of the DMA Engine and get data copied. A non-executed
  420. * promise will live in the demands list on a contract. Once it has been
  421. * completed, it will be moved to the completed demands list for later freeing.
  422. * All linked promises will be freed when the corresponding contract is freed
  423. */
  424. static struct sun4i_dma_promise *
  425. generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
  426. size_t len, struct dma_slave_config *sconfig)
  427. {
  428. struct sun4i_dma_promise *promise;
  429. int ret;
  430. promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
  431. if (!promise)
  432. return NULL;
  433. promise->src = src;
  434. promise->dst = dest;
  435. promise->len = len;
  436. promise->cfg = SUN4I_DMA_CFG_LOADING |
  437. SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
  438. /* Source burst */
  439. ret = convert_burst(sconfig->src_maxburst);
  440. if (ret < 0)
  441. goto fail;
  442. promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
  443. /* Destination burst */
  444. ret = convert_burst(sconfig->dst_maxburst);
  445. if (ret < 0)
  446. goto fail;
  447. promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
  448. /* Source bus width */
  449. ret = convert_buswidth(sconfig->src_addr_width);
  450. if (ret < 0)
  451. goto fail;
  452. promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
  453. /* Destination bus width */
  454. ret = convert_buswidth(sconfig->dst_addr_width);
  455. if (ret < 0)
  456. goto fail;
  457. promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
  458. return promise;
  459. fail:
  460. kfree(promise);
  461. return NULL;
  462. }
  463. /*
  464. * Generate a contract
  465. *
  466. * Contracts function as DMA descriptors. As our hardware does not support
  467. * linked lists, we need to implement SG via software. We use a contract
  468. * to hold all the pieces of the request and process them serially one
  469. * after another. Each piece is represented as a promise.
  470. */
  471. static struct sun4i_dma_contract *generate_dma_contract(void)
  472. {
  473. struct sun4i_dma_contract *contract;
  474. contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
  475. if (!contract)
  476. return NULL;
  477. INIT_LIST_HEAD(&contract->demands);
  478. INIT_LIST_HEAD(&contract->completed_demands);
  479. return contract;
  480. }
  481. /*
  482. * Get next promise on a cyclic transfer
  483. *
  484. * Cyclic contracts contain a series of promises which are executed on a
  485. * loop. This function returns the next promise from a cyclic contract,
  486. * so it can be programmed into the hardware.
  487. */
  488. static struct sun4i_dma_promise *
  489. get_next_cyclic_promise(struct sun4i_dma_contract *contract)
  490. {
  491. struct sun4i_dma_promise *promise;
  492. promise = list_first_entry_or_null(&contract->demands,
  493. struct sun4i_dma_promise, list);
  494. if (!promise) {
  495. list_splice_init(&contract->completed_demands,
  496. &contract->demands);
  497. promise = list_first_entry(&contract->demands,
  498. struct sun4i_dma_promise, list);
  499. }
  500. return promise;
  501. }
  502. /*
  503. * Free a contract and all its associated promises
  504. */
  505. static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
  506. {
  507. struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
  508. struct sun4i_dma_promise *promise, *tmp;
  509. /* Free all the demands and completed demands */
  510. list_for_each_entry_safe(promise, tmp, &contract->demands, list)
  511. kfree(promise);
  512. list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
  513. kfree(promise);
  514. kfree(contract);
  515. }
  516. static struct dma_async_tx_descriptor *
  517. sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
  518. dma_addr_t src, size_t len, unsigned long flags)
  519. {
  520. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  521. struct dma_slave_config *sconfig = &vchan->cfg;
  522. struct sun4i_dma_promise *promise;
  523. struct sun4i_dma_contract *contract;
  524. contract = generate_dma_contract();
  525. if (!contract)
  526. return NULL;
  527. /*
  528. * We can only do the copy to bus aligned addresses, so
  529. * choose the best one so we get decent performance. We also
  530. * maximize the burst size for this same reason.
  531. */
  532. sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  533. sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  534. sconfig->src_maxburst = 8;
  535. sconfig->dst_maxburst = 8;
  536. if (vchan->is_dedicated)
  537. promise = generate_ddma_promise(chan, src, dest, len, sconfig);
  538. else
  539. promise = generate_ndma_promise(chan, src, dest, len, sconfig,
  540. DMA_MEM_TO_MEM);
  541. if (!promise) {
  542. kfree(contract);
  543. return NULL;
  544. }
  545. /* Configure memcpy mode */
  546. if (vchan->is_dedicated) {
  547. promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
  548. SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
  549. } else {
  550. promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
  551. SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
  552. }
  553. /* Fill the contract with our only promise */
  554. list_add_tail(&promise->list, &contract->demands);
  555. /* And add it to the vchan */
  556. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  557. }
  558. static struct dma_async_tx_descriptor *
  559. sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
  560. size_t period_len, enum dma_transfer_direction dir,
  561. unsigned long flags)
  562. {
  563. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  564. struct dma_slave_config *sconfig = &vchan->cfg;
  565. struct sun4i_dma_promise *promise;
  566. struct sun4i_dma_contract *contract;
  567. dma_addr_t src, dest;
  568. u32 endpoints;
  569. int nr_periods, offset, plength, i;
  570. u8 ram_type, io_mode, linear_mode;
  571. if (!is_slave_direction(dir)) {
  572. dev_err(chan2dev(chan), "Invalid DMA direction\n");
  573. return NULL;
  574. }
  575. contract = generate_dma_contract();
  576. if (!contract)
  577. return NULL;
  578. contract->is_cyclic = 1;
  579. if (vchan->is_dedicated) {
  580. io_mode = SUN4I_DDMA_ADDR_MODE_IO;
  581. linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
  582. ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
  583. } else {
  584. io_mode = SUN4I_NDMA_ADDR_MODE_IO;
  585. linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
  586. ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
  587. }
  588. if (dir == DMA_MEM_TO_DEV) {
  589. src = buf;
  590. dest = sconfig->dst_addr;
  591. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
  592. SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
  593. SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
  594. SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
  595. } else {
  596. src = sconfig->src_addr;
  597. dest = buf;
  598. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
  599. SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
  600. SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
  601. SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
  602. }
  603. /*
  604. * We will be using half done interrupts to make two periods
  605. * out of a promise, so we need to program the DMA engine less
  606. * often
  607. */
  608. /*
  609. * The engine can interrupt on half-transfer, so we can use
  610. * this feature to program the engine half as often as if we
  611. * didn't use it (keep in mind the hardware doesn't support
  612. * linked lists).
  613. *
  614. * Say you have a set of periods (| marks the start/end, I for
  615. * interrupt, P for programming the engine to do a new
  616. * transfer), the easy but slow way would be to do
  617. *
  618. * |---|---|---|---| (periods / promises)
  619. * P I,P I,P I,P I
  620. *
  621. * Using half transfer interrupts you can do
  622. *
  623. * |-------|-------| (promises as configured on hw)
  624. * |---|---|---|---| (periods)
  625. * P I I,P I I
  626. *
  627. * Which requires half the engine programming for the same
  628. * functionality.
  629. *
  630. * This only works if two periods fit in a single promise. That will
  631. * always be the case for dedicated DMA, where the hardware has a much
  632. * larger maximum transfer size than advertised to clients.
  633. */
  634. if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
  635. period_len *= 2;
  636. contract->use_half_int = 1;
  637. }
  638. nr_periods = DIV_ROUND_UP(len, period_len);
  639. for (i = 0; i < nr_periods; i++) {
  640. /* Calculate the offset in the buffer and the length needed */
  641. offset = i * period_len;
  642. plength = min((len - offset), period_len);
  643. if (dir == DMA_MEM_TO_DEV)
  644. src = buf + offset;
  645. else
  646. dest = buf + offset;
  647. /* Make the promise */
  648. if (vchan->is_dedicated)
  649. promise = generate_ddma_promise(chan, src, dest,
  650. plength, sconfig);
  651. else
  652. promise = generate_ndma_promise(chan, src, dest,
  653. plength, sconfig, dir);
  654. if (!promise) {
  655. /* TODO: should we free everything? */
  656. return NULL;
  657. }
  658. promise->cfg |= endpoints;
  659. /* Then add it to the contract */
  660. list_add_tail(&promise->list, &contract->demands);
  661. }
  662. /* And add it to the vchan */
  663. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  664. }
  665. static struct dma_async_tx_descriptor *
  666. sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  667. unsigned int sg_len, enum dma_transfer_direction dir,
  668. unsigned long flags, void *context)
  669. {
  670. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  671. struct dma_slave_config *sconfig = &vchan->cfg;
  672. struct sun4i_dma_promise *promise;
  673. struct sun4i_dma_contract *contract;
  674. u8 ram_type, io_mode, linear_mode;
  675. struct scatterlist *sg;
  676. dma_addr_t srcaddr, dstaddr;
  677. u32 endpoints, para;
  678. int i;
  679. if (!sgl)
  680. return NULL;
  681. if (!is_slave_direction(dir)) {
  682. dev_err(chan2dev(chan), "Invalid DMA direction\n");
  683. return NULL;
  684. }
  685. contract = generate_dma_contract();
  686. if (!contract)
  687. return NULL;
  688. if (vchan->is_dedicated) {
  689. io_mode = SUN4I_DDMA_ADDR_MODE_IO;
  690. linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
  691. ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
  692. } else {
  693. io_mode = SUN4I_NDMA_ADDR_MODE_IO;
  694. linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
  695. ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
  696. }
  697. if (dir == DMA_MEM_TO_DEV)
  698. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
  699. SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
  700. SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
  701. SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
  702. else
  703. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
  704. SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
  705. SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
  706. SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
  707. for_each_sg(sgl, sg, sg_len, i) {
  708. /* Figure out addresses */
  709. if (dir == DMA_MEM_TO_DEV) {
  710. srcaddr = sg_dma_address(sg);
  711. dstaddr = sconfig->dst_addr;
  712. } else {
  713. srcaddr = sconfig->src_addr;
  714. dstaddr = sg_dma_address(sg);
  715. }
  716. /*
  717. * These are the magic DMA engine timings that keep SPI going.
  718. * I haven't seen any interface on DMAEngine to configure
  719. * timings, and so far they seem to work for everything we
  720. * support, so I've kept them here. I don't know if other
  721. * devices need different timings because, as usual, we only
  722. * have the "para" bitfield meanings, but no comment on what
  723. * the values should be when doing a certain operation :|
  724. */
  725. para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
  726. /* And make a suitable promise */
  727. if (vchan->is_dedicated)
  728. promise = generate_ddma_promise(chan, srcaddr, dstaddr,
  729. sg_dma_len(sg),
  730. sconfig);
  731. else
  732. promise = generate_ndma_promise(chan, srcaddr, dstaddr,
  733. sg_dma_len(sg),
  734. sconfig, dir);
  735. if (!promise)
  736. return NULL; /* TODO: should we free everything? */
  737. promise->cfg |= endpoints;
  738. promise->para = para;
  739. /* Then add it to the contract */
  740. list_add_tail(&promise->list, &contract->demands);
  741. }
  742. /*
  743. * Once we've got all the promises ready, add the contract
  744. * to the pending list on the vchan
  745. */
  746. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  747. }
  748. static int sun4i_dma_terminate_all(struct dma_chan *chan)
  749. {
  750. struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
  751. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  752. struct sun4i_dma_pchan *pchan = vchan->pchan;
  753. LIST_HEAD(head);
  754. unsigned long flags;
  755. spin_lock_irqsave(&vchan->vc.lock, flags);
  756. vchan_get_all_descriptors(&vchan->vc, &head);
  757. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  758. /*
  759. * Clearing the configuration register will halt the pchan. Interrupts
  760. * may still trigger, so don't forget to disable them.
  761. */
  762. if (pchan) {
  763. if (pchan->is_dedicated)
  764. writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
  765. else
  766. writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
  767. set_pchan_interrupt(priv, pchan, 0, 0);
  768. release_pchan(priv, pchan);
  769. }
  770. spin_lock_irqsave(&vchan->vc.lock, flags);
  771. /* Clear these so the vchan is usable again */
  772. vchan->processing = NULL;
  773. vchan->pchan = NULL;
  774. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  775. vchan_dma_desc_free_list(&vchan->vc, &head);
  776. return 0;
  777. }
  778. static int sun4i_dma_config(struct dma_chan *chan,
  779. struct dma_slave_config *config)
  780. {
  781. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  782. memcpy(&vchan->cfg, config, sizeof(*config));
  783. return 0;
  784. }
  785. static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
  786. struct of_dma *ofdma)
  787. {
  788. struct sun4i_dma_dev *priv = ofdma->of_dma_data;
  789. struct sun4i_dma_vchan *vchan;
  790. struct dma_chan *chan;
  791. u8 is_dedicated = dma_spec->args[0];
  792. u8 endpoint = dma_spec->args[1];
  793. /* Check if type is Normal or Dedicated */
  794. if (is_dedicated != 0 && is_dedicated != 1)
  795. return NULL;
  796. /* Make sure the endpoint looks sane */
  797. if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
  798. (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
  799. return NULL;
  800. chan = dma_get_any_slave_channel(&priv->slave);
  801. if (!chan)
  802. return NULL;
  803. /* Assign the endpoint to the vchan */
  804. vchan = to_sun4i_dma_vchan(chan);
  805. vchan->is_dedicated = is_dedicated;
  806. vchan->endpoint = endpoint;
  807. return chan;
  808. }
  809. static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
  810. dma_cookie_t cookie,
  811. struct dma_tx_state *state)
  812. {
  813. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  814. struct sun4i_dma_pchan *pchan = vchan->pchan;
  815. struct sun4i_dma_contract *contract;
  816. struct sun4i_dma_promise *promise;
  817. struct virt_dma_desc *vd;
  818. unsigned long flags;
  819. enum dma_status ret;
  820. size_t bytes = 0;
  821. ret = dma_cookie_status(chan, cookie, state);
  822. if (!state || (ret == DMA_COMPLETE))
  823. return ret;
  824. spin_lock_irqsave(&vchan->vc.lock, flags);
  825. vd = vchan_find_desc(&vchan->vc, cookie);
  826. if (!vd)
  827. goto exit;
  828. contract = to_sun4i_dma_contract(vd);
  829. list_for_each_entry(promise, &contract->demands, list)
  830. bytes += promise->len;
  831. /*
  832. * The hardware is configured to return the remaining byte
  833. * quantity. If possible, replace the first listed element's
  834. * full size with the actual remaining amount
  835. */
  836. promise = list_first_entry_or_null(&contract->demands,
  837. struct sun4i_dma_promise, list);
  838. if (promise && pchan) {
  839. bytes -= promise->len;
  840. if (pchan->is_dedicated)
  841. bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
  842. else
  843. bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
  844. }
  845. exit:
  846. dma_set_residue(state, bytes);
  847. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  848. return ret;
  849. }
  850. static void sun4i_dma_issue_pending(struct dma_chan *chan)
  851. {
  852. struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
  853. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  854. unsigned long flags;
  855. spin_lock_irqsave(&vchan->vc.lock, flags);
  856. /*
  857. * If there are pending transactions for this vchan, push one of
  858. * them into the engine to get the ball rolling.
  859. */
  860. if (vchan_issue_pending(&vchan->vc))
  861. __execute_vchan_pending(priv, vchan);
  862. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  863. }
  864. static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
  865. {
  866. struct sun4i_dma_dev *priv = dev_id;
  867. struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
  868. struct sun4i_dma_vchan *vchan;
  869. struct sun4i_dma_contract *contract;
  870. struct sun4i_dma_promise *promise;
  871. unsigned long pendirq, irqs, disableirqs;
  872. int bit, i, free_room, allow_mitigation = 1;
  873. pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  874. handle_pending:
  875. disableirqs = 0;
  876. free_room = 0;
  877. for_each_set_bit(bit, &pendirq, 32) {
  878. pchan = &pchans[bit >> 1];
  879. vchan = pchan->vchan;
  880. if (!vchan) /* a terminated channel may still interrupt */
  881. continue;
  882. contract = vchan->contract;
  883. /*
  884. * Disable the IRQ and free the pchan if it's an end
  885. * interrupt (odd bit)
  886. */
  887. if (bit & 1) {
  888. spin_lock(&vchan->vc.lock);
  889. /*
  890. * Move the promise into the completed list now that
  891. * we're done with it
  892. */
  893. list_move_tail(&vchan->processing->list,
  894. &contract->completed_demands);
  895. /*
  896. * Cyclic DMA transfers are special:
  897. * - There's always something we can dispatch
  898. * - We need to run the callback
  899. * - Latency is very important, as this is used by audio
  900. * We therefore just cycle through the list and dispatch
  901. * whatever we have here, reusing the pchan. There's
  902. * no need to run the thread after this.
  903. *
  904. * For non-cyclic transfers we need to look around,
  905. * so we can program some more work, or notify the
  906. * client that their transfers have been completed.
  907. */
  908. if (contract->is_cyclic) {
  909. promise = get_next_cyclic_promise(contract);
  910. vchan->processing = promise;
  911. configure_pchan(pchan, promise);
  912. vchan_cyclic_callback(&contract->vd);
  913. } else {
  914. vchan->processing = NULL;
  915. vchan->pchan = NULL;
  916. free_room = 1;
  917. disableirqs |= BIT(bit);
  918. release_pchan(priv, pchan);
  919. }
  920. spin_unlock(&vchan->vc.lock);
  921. } else {
  922. /* Half done interrupt */
  923. if (contract->is_cyclic)
  924. vchan_cyclic_callback(&contract->vd);
  925. else
  926. disableirqs |= BIT(bit);
  927. }
  928. }
  929. /* Disable the IRQs for events we handled */
  930. spin_lock(&priv->lock);
  931. irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  932. writel_relaxed(irqs & ~disableirqs,
  933. priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  934. spin_unlock(&priv->lock);
  935. /* Writing 1 to the pending field will clear the pending interrupt */
  936. writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  937. /*
  938. * If a pchan was freed, we may be able to schedule something else,
  939. * so have a look around
  940. */
  941. if (free_room) {
  942. for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
  943. vchan = &priv->vchans[i];
  944. spin_lock(&vchan->vc.lock);
  945. __execute_vchan_pending(priv, vchan);
  946. spin_unlock(&vchan->vc.lock);
  947. }
  948. }
  949. /*
  950. * Handle newer interrupts if some showed up, but only do it once
  951. * to avoid a too long a loop
  952. */
  953. if (allow_mitigation) {
  954. pendirq = readl_relaxed(priv->base +
  955. SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  956. if (pendirq) {
  957. allow_mitigation = 0;
  958. goto handle_pending;
  959. }
  960. }
  961. return IRQ_HANDLED;
  962. }
  963. static int sun4i_dma_probe(struct platform_device *pdev)
  964. {
  965. struct sun4i_dma_dev *priv;
  966. struct resource *res;
  967. int i, j, ret;
  968. priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
  969. if (!priv)
  970. return -ENOMEM;
  971. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  972. priv->base = devm_ioremap_resource(&pdev->dev, res);
  973. if (IS_ERR(priv->base))
  974. return PTR_ERR(priv->base);
  975. priv->irq = platform_get_irq(pdev, 0);
  976. if (priv->irq < 0)
  977. return priv->irq;
  978. priv->clk = devm_clk_get(&pdev->dev, NULL);
  979. if (IS_ERR(priv->clk)) {
  980. dev_err(&pdev->dev, "No clock specified\n");
  981. return PTR_ERR(priv->clk);
  982. }
  983. platform_set_drvdata(pdev, priv);
  984. spin_lock_init(&priv->lock);
  985. dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE);
  986. dma_cap_zero(priv->slave.cap_mask);
  987. dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
  988. dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
  989. dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
  990. dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
  991. INIT_LIST_HEAD(&priv->slave.channels);
  992. priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources;
  993. priv->slave.device_tx_status = sun4i_dma_tx_status;
  994. priv->slave.device_issue_pending = sun4i_dma_issue_pending;
  995. priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg;
  996. priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy;
  997. priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic;
  998. priv->slave.device_config = sun4i_dma_config;
  999. priv->slave.device_terminate_all = sun4i_dma_terminate_all;
  1000. priv->slave.copy_align = 2;
  1001. priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1002. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1003. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1004. priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1005. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1006. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1007. priv->slave.directions = BIT(DMA_DEV_TO_MEM) |
  1008. BIT(DMA_MEM_TO_DEV);
  1009. priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1010. priv->slave.dev = &pdev->dev;
  1011. priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
  1012. sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
  1013. priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
  1014. sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
  1015. if (!priv->vchans || !priv->pchans)
  1016. return -ENOMEM;
  1017. /*
  1018. * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
  1019. * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
  1020. * dedicated ones
  1021. */
  1022. for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
  1023. priv->pchans[i].base = priv->base +
  1024. SUN4I_NDMA_CHANNEL_REG_BASE(i);
  1025. for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
  1026. priv->pchans[i].base = priv->base +
  1027. SUN4I_DDMA_CHANNEL_REG_BASE(j);
  1028. priv->pchans[i].is_dedicated = 1;
  1029. }
  1030. for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
  1031. struct sun4i_dma_vchan *vchan = &priv->vchans[i];
  1032. spin_lock_init(&vchan->vc.lock);
  1033. vchan->vc.desc_free = sun4i_dma_free_contract;
  1034. vchan_init(&vchan->vc, &priv->slave);
  1035. }
  1036. ret = clk_prepare_enable(priv->clk);
  1037. if (ret) {
  1038. dev_err(&pdev->dev, "Couldn't enable the clock\n");
  1039. return ret;
  1040. }
  1041. /*
  1042. * Make sure the IRQs are all disabled and accounted for. The bootloader
  1043. * likes to leave these dirty
  1044. */
  1045. writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  1046. writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  1047. ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
  1048. 0, dev_name(&pdev->dev), priv);
  1049. if (ret) {
  1050. dev_err(&pdev->dev, "Cannot request IRQ\n");
  1051. goto err_clk_disable;
  1052. }
  1053. ret = dma_async_device_register(&priv->slave);
  1054. if (ret) {
  1055. dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
  1056. goto err_clk_disable;
  1057. }
  1058. ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
  1059. priv);
  1060. if (ret) {
  1061. dev_err(&pdev->dev, "of_dma_controller_register failed\n");
  1062. goto err_dma_unregister;
  1063. }
  1064. dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
  1065. return 0;
  1066. err_dma_unregister:
  1067. dma_async_device_unregister(&priv->slave);
  1068. err_clk_disable:
  1069. clk_disable_unprepare(priv->clk);
  1070. return ret;
  1071. }
  1072. static int sun4i_dma_remove(struct platform_device *pdev)
  1073. {
  1074. struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
  1075. /* Disable IRQ so no more work is scheduled */
  1076. disable_irq(priv->irq);
  1077. of_dma_controller_free(pdev->dev.of_node);
  1078. dma_async_device_unregister(&priv->slave);
  1079. clk_disable_unprepare(priv->clk);
  1080. return 0;
  1081. }
  1082. static const struct of_device_id sun4i_dma_match[] = {
  1083. { .compatible = "allwinner,sun4i-a10-dma" },
  1084. { /* sentinel */ },
  1085. };
  1086. MODULE_DEVICE_TABLE(of, sun4i_dma_match);
  1087. static struct platform_driver sun4i_dma_driver = {
  1088. .probe = sun4i_dma_probe,
  1089. .remove = sun4i_dma_remove,
  1090. .driver = {
  1091. .name = "sun4i-dma",
  1092. .of_match_table = sun4i_dma_match,
  1093. },
  1094. };
  1095. module_platform_driver(sun4i_dma_driver);
  1096. MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
  1097. MODULE_AUTHOR("Emilio López <[email protected]>");
  1098. MODULE_LICENSE("GPL");