edma.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * TI EDMA DMA engine driver
  4. *
  5. * Copyright 2012 Texas Instruments
  6. */
  7. #include <linux/dmaengine.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/bitmap.h>
  10. #include <linux/err.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/of.h>
  19. #include <linux/of_dma.h>
  20. #include <linux/of_irq.h>
  21. #include <linux/of_address.h>
  22. #include <linux/of_device.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/platform_data/edma.h>
  25. #include "../dmaengine.h"
  26. #include "../virt-dma.h"
  27. /* Offsets matching "struct edmacc_param" */
  28. #define PARM_OPT 0x00
  29. #define PARM_SRC 0x04
  30. #define PARM_A_B_CNT 0x08
  31. #define PARM_DST 0x0c
  32. #define PARM_SRC_DST_BIDX 0x10
  33. #define PARM_LINK_BCNTRLD 0x14
  34. #define PARM_SRC_DST_CIDX 0x18
  35. #define PARM_CCNT 0x1c
  36. #define PARM_SIZE 0x20
  37. /* Offsets for EDMA CC global channel registers and their shadows */
  38. #define SH_ER 0x00 /* 64 bits */
  39. #define SH_ECR 0x08 /* 64 bits */
  40. #define SH_ESR 0x10 /* 64 bits */
  41. #define SH_CER 0x18 /* 64 bits */
  42. #define SH_EER 0x20 /* 64 bits */
  43. #define SH_EECR 0x28 /* 64 bits */
  44. #define SH_EESR 0x30 /* 64 bits */
  45. #define SH_SER 0x38 /* 64 bits */
  46. #define SH_SECR 0x40 /* 64 bits */
  47. #define SH_IER 0x50 /* 64 bits */
  48. #define SH_IECR 0x58 /* 64 bits */
  49. #define SH_IESR 0x60 /* 64 bits */
  50. #define SH_IPR 0x68 /* 64 bits */
  51. #define SH_ICR 0x70 /* 64 bits */
  52. #define SH_IEVAL 0x78
  53. #define SH_QER 0x80
  54. #define SH_QEER 0x84
  55. #define SH_QEECR 0x88
  56. #define SH_QEESR 0x8c
  57. #define SH_QSER 0x90
  58. #define SH_QSECR 0x94
  59. #define SH_SIZE 0x200
  60. /* Offsets for EDMA CC global registers */
  61. #define EDMA_REV 0x0000
  62. #define EDMA_CCCFG 0x0004
  63. #define EDMA_QCHMAP 0x0200 /* 8 registers */
  64. #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
  65. #define EDMA_QDMAQNUM 0x0260
  66. #define EDMA_QUETCMAP 0x0280
  67. #define EDMA_QUEPRI 0x0284
  68. #define EDMA_EMR 0x0300 /* 64 bits */
  69. #define EDMA_EMCR 0x0308 /* 64 bits */
  70. #define EDMA_QEMR 0x0310
  71. #define EDMA_QEMCR 0x0314
  72. #define EDMA_CCERR 0x0318
  73. #define EDMA_CCERRCLR 0x031c
  74. #define EDMA_EEVAL 0x0320
  75. #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
  76. #define EDMA_QRAE 0x0380 /* 4 registers */
  77. #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
  78. #define EDMA_QSTAT 0x0600 /* 2 registers */
  79. #define EDMA_QWMTHRA 0x0620
  80. #define EDMA_QWMTHRB 0x0624
  81. #define EDMA_CCSTAT 0x0640
  82. #define EDMA_M 0x1000 /* global channel registers */
  83. #define EDMA_ECR 0x1008
  84. #define EDMA_ECRH 0x100C
  85. #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
  86. #define EDMA_PARM 0x4000 /* PaRAM entries */
  87. #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
  88. #define EDMA_DCHMAP 0x0100 /* 64 registers */
  89. /* CCCFG register */
  90. #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
  91. #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
  92. #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
  93. #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
  94. #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
  95. #define CHMAP_EXIST BIT(24)
  96. /* CCSTAT register */
  97. #define EDMA_CCSTAT_ACTV BIT(4)
  98. /*
  99. * Max of 20 segments per channel to conserve PaRAM slots
  100. * Also note that MAX_NR_SG should be at least the no.of periods
  101. * that are required for ASoC, otherwise DMA prep calls will
  102. * fail. Today davinci-pcm is the only user of this driver and
  103. * requires at least 17 slots, so we setup the default to 20.
  104. */
  105. #define MAX_NR_SG 20
  106. #define EDMA_MAX_SLOTS MAX_NR_SG
  107. #define EDMA_DESCRIPTORS 16
  108. #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
  109. #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
  110. #define EDMA_CONT_PARAMS_ANY 1001
  111. #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
  112. #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
  113. /*
  114. * 64bit array registers are split into two 32bit registers:
  115. * reg0: channel/event 0-31
  116. * reg1: channel/event 32-63
  117. *
  118. * bit 5 in the channel number tells the array index (0/1)
  119. * bit 0-4 (0x1f) is the bit offset within the register
  120. */
  121. #define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5)
  122. #define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f))
  123. /* PaRAM slots are laid out like this */
  124. struct edmacc_param {
  125. u32 opt;
  126. u32 src;
  127. u32 a_b_cnt;
  128. u32 dst;
  129. u32 src_dst_bidx;
  130. u32 link_bcntrld;
  131. u32 src_dst_cidx;
  132. u32 ccnt;
  133. } __packed;
  134. /* fields in edmacc_param.opt */
  135. #define SAM BIT(0)
  136. #define DAM BIT(1)
  137. #define SYNCDIM BIT(2)
  138. #define STATIC BIT(3)
  139. #define EDMA_FWID (0x07 << 8)
  140. #define TCCMODE BIT(11)
  141. #define EDMA_TCC(t) ((t) << 12)
  142. #define TCINTEN BIT(20)
  143. #define ITCINTEN BIT(21)
  144. #define TCCHEN BIT(22)
  145. #define ITCCHEN BIT(23)
  146. struct edma_pset {
  147. u32 len;
  148. dma_addr_t addr;
  149. struct edmacc_param param;
  150. };
  151. struct edma_desc {
  152. struct virt_dma_desc vdesc;
  153. struct list_head node;
  154. enum dma_transfer_direction direction;
  155. int cyclic;
  156. bool polled;
  157. int absync;
  158. int pset_nr;
  159. struct edma_chan *echan;
  160. int processed;
  161. /*
  162. * The following 4 elements are used for residue accounting.
  163. *
  164. * - processed_stat: the number of SG elements we have traversed
  165. * so far to cover accounting. This is updated directly to processed
  166. * during edma_callback and is always <= processed, because processed
  167. * refers to the number of pending transfer (programmed to EDMA
  168. * controller), where as processed_stat tracks number of transfers
  169. * accounted for so far.
  170. *
  171. * - residue: The amount of bytes we have left to transfer for this desc
  172. *
  173. * - residue_stat: The residue in bytes of data we have covered
  174. * so far for accounting. This is updated directly to residue
  175. * during callbacks to keep it current.
  176. *
  177. * - sg_len: Tracks the length of the current intermediate transfer,
  178. * this is required to update the residue during intermediate transfer
  179. * completion callback.
  180. */
  181. int processed_stat;
  182. u32 sg_len;
  183. u32 residue;
  184. u32 residue_stat;
  185. struct edma_pset pset[];
  186. };
  187. struct edma_cc;
  188. struct edma_tc {
  189. struct device_node *node;
  190. u16 id;
  191. };
  192. struct edma_chan {
  193. struct virt_dma_chan vchan;
  194. struct list_head node;
  195. struct edma_desc *edesc;
  196. struct edma_cc *ecc;
  197. struct edma_tc *tc;
  198. int ch_num;
  199. bool alloced;
  200. bool hw_triggered;
  201. int slot[EDMA_MAX_SLOTS];
  202. int missed;
  203. struct dma_slave_config cfg;
  204. };
  205. struct edma_cc {
  206. struct device *dev;
  207. struct edma_soc_info *info;
  208. void __iomem *base;
  209. int id;
  210. bool legacy_mode;
  211. /* eDMA3 resource information */
  212. unsigned num_channels;
  213. unsigned num_qchannels;
  214. unsigned num_region;
  215. unsigned num_slots;
  216. unsigned num_tc;
  217. bool chmap_exist;
  218. enum dma_event_q default_queue;
  219. unsigned int ccint;
  220. unsigned int ccerrint;
  221. /*
  222. * The slot_inuse bit for each PaRAM slot is clear unless the slot is
  223. * in use by Linux or if it is allocated to be used by DSP.
  224. */
  225. unsigned long *slot_inuse;
  226. /*
  227. * For tracking reserved channels used by DSP.
  228. * If the bit is cleared, the channel is allocated to be used by DSP
  229. * and Linux must not touch it.
  230. */
  231. unsigned long *channels_mask;
  232. struct dma_device dma_slave;
  233. struct dma_device *dma_memcpy;
  234. struct edma_chan *slave_chans;
  235. struct edma_tc *tc_list;
  236. int dummy_slot;
  237. };
  238. /* dummy param set used to (re)initialize parameter RAM slots */
  239. static const struct edmacc_param dummy_paramset = {
  240. .link_bcntrld = 0xffff,
  241. .ccnt = 1,
  242. };
  243. #define EDMA_BINDING_LEGACY 0
  244. #define EDMA_BINDING_TPCC 1
  245. static const u32 edma_binding_type[] = {
  246. [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
  247. [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
  248. };
  249. static const struct of_device_id edma_of_ids[] = {
  250. {
  251. .compatible = "ti,edma3",
  252. .data = &edma_binding_type[EDMA_BINDING_LEGACY],
  253. },
  254. {
  255. .compatible = "ti,edma3-tpcc",
  256. .data = &edma_binding_type[EDMA_BINDING_TPCC],
  257. },
  258. {}
  259. };
  260. MODULE_DEVICE_TABLE(of, edma_of_ids);
  261. static const struct of_device_id edma_tptc_of_ids[] = {
  262. { .compatible = "ti,edma3-tptc", },
  263. {}
  264. };
  265. MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
  266. static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
  267. {
  268. return (unsigned int)__raw_readl(ecc->base + offset);
  269. }
  270. static inline void edma_write(struct edma_cc *ecc, int offset, int val)
  271. {
  272. __raw_writel(val, ecc->base + offset);
  273. }
  274. static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
  275. unsigned or)
  276. {
  277. unsigned val = edma_read(ecc, offset);
  278. val &= and;
  279. val |= or;
  280. edma_write(ecc, offset, val);
  281. }
  282. static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
  283. {
  284. unsigned val = edma_read(ecc, offset);
  285. val &= and;
  286. edma_write(ecc, offset, val);
  287. }
  288. static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
  289. {
  290. unsigned val = edma_read(ecc, offset);
  291. val |= or;
  292. edma_write(ecc, offset, val);
  293. }
  294. static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
  295. int i)
  296. {
  297. return edma_read(ecc, offset + (i << 2));
  298. }
  299. static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
  300. unsigned val)
  301. {
  302. edma_write(ecc, offset + (i << 2), val);
  303. }
  304. static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
  305. unsigned and, unsigned or)
  306. {
  307. edma_modify(ecc, offset + (i << 2), and, or);
  308. }
  309. static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
  310. unsigned or)
  311. {
  312. edma_or(ecc, offset + ((i * 2 + j) << 2), or);
  313. }
  314. static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
  315. int j, unsigned val)
  316. {
  317. edma_write(ecc, offset + ((i * 2 + j) << 2), val);
  318. }
  319. static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
  320. int offset, int i)
  321. {
  322. return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
  323. }
  324. static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
  325. unsigned val)
  326. {
  327. edma_write(ecc, EDMA_SHADOW0 + offset, val);
  328. }
  329. static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
  330. int i, unsigned val)
  331. {
  332. edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
  333. }
  334. static inline void edma_param_modify(struct edma_cc *ecc, int offset,
  335. int param_no, unsigned and, unsigned or)
  336. {
  337. edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
  338. }
  339. static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
  340. int priority)
  341. {
  342. int bit = queue_no * 4;
  343. edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
  344. }
  345. static void edma_set_chmap(struct edma_chan *echan, int slot)
  346. {
  347. struct edma_cc *ecc = echan->ecc;
  348. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  349. if (ecc->chmap_exist) {
  350. slot = EDMA_CHAN_SLOT(slot);
  351. edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
  352. }
  353. }
  354. static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
  355. {
  356. struct edma_cc *ecc = echan->ecc;
  357. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  358. int idx = EDMA_REG_ARRAY_INDEX(channel);
  359. int ch_bit = EDMA_CHANNEL_BIT(channel);
  360. if (enable) {
  361. edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
  362. edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
  363. } else {
  364. edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
  365. }
  366. }
  367. /*
  368. * paRAM slot management functions
  369. */
  370. static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
  371. const struct edmacc_param *param)
  372. {
  373. slot = EDMA_CHAN_SLOT(slot);
  374. if (slot >= ecc->num_slots)
  375. return;
  376. memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
  377. }
  378. static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
  379. struct edmacc_param *param)
  380. {
  381. slot = EDMA_CHAN_SLOT(slot);
  382. if (slot >= ecc->num_slots)
  383. return -EINVAL;
  384. memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
  385. return 0;
  386. }
  387. /**
  388. * edma_alloc_slot - allocate DMA parameter RAM
  389. * @ecc: pointer to edma_cc struct
  390. * @slot: specific slot to allocate; negative for "any unused slot"
  391. *
  392. * This allocates a parameter RAM slot, initializing it to hold a
  393. * dummy transfer. Slots allocated using this routine have not been
  394. * mapped to a hardware DMA channel, and will normally be used by
  395. * linking to them from a slot associated with a DMA channel.
  396. *
  397. * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
  398. * slots may be allocated on behalf of DSP firmware.
  399. *
  400. * Returns the number of the slot, else negative errno.
  401. */
  402. static int edma_alloc_slot(struct edma_cc *ecc, int slot)
  403. {
  404. if (slot >= 0) {
  405. slot = EDMA_CHAN_SLOT(slot);
  406. /* Requesting entry paRAM slot for a HW triggered channel. */
  407. if (ecc->chmap_exist && slot < ecc->num_channels)
  408. slot = EDMA_SLOT_ANY;
  409. }
  410. if (slot < 0) {
  411. if (ecc->chmap_exist)
  412. slot = 0;
  413. else
  414. slot = ecc->num_channels;
  415. for (;;) {
  416. slot = find_next_zero_bit(ecc->slot_inuse,
  417. ecc->num_slots,
  418. slot);
  419. if (slot == ecc->num_slots)
  420. return -ENOMEM;
  421. if (!test_and_set_bit(slot, ecc->slot_inuse))
  422. break;
  423. }
  424. } else if (slot >= ecc->num_slots) {
  425. return -EINVAL;
  426. } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
  427. return -EBUSY;
  428. }
  429. edma_write_slot(ecc, slot, &dummy_paramset);
  430. return EDMA_CTLR_CHAN(ecc->id, slot);
  431. }
  432. static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
  433. {
  434. slot = EDMA_CHAN_SLOT(slot);
  435. if (slot >= ecc->num_slots)
  436. return;
  437. edma_write_slot(ecc, slot, &dummy_paramset);
  438. clear_bit(slot, ecc->slot_inuse);
  439. }
  440. /**
  441. * edma_link - link one parameter RAM slot to another
  442. * @ecc: pointer to edma_cc struct
  443. * @from: parameter RAM slot originating the link
  444. * @to: parameter RAM slot which is the link target
  445. *
  446. * The originating slot should not be part of any active DMA transfer.
  447. */
  448. static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
  449. {
  450. if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
  451. dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
  452. from = EDMA_CHAN_SLOT(from);
  453. to = EDMA_CHAN_SLOT(to);
  454. if (from >= ecc->num_slots || to >= ecc->num_slots)
  455. return;
  456. edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
  457. PARM_OFFSET(to));
  458. }
  459. /**
  460. * edma_get_position - returns the current transfer point
  461. * @ecc: pointer to edma_cc struct
  462. * @slot: parameter RAM slot being examined
  463. * @dst: true selects the dest position, false the source
  464. *
  465. * Returns the position of the current active slot
  466. */
  467. static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
  468. bool dst)
  469. {
  470. u32 offs;
  471. slot = EDMA_CHAN_SLOT(slot);
  472. offs = PARM_OFFSET(slot);
  473. offs += dst ? PARM_DST : PARM_SRC;
  474. return edma_read(ecc, offs);
  475. }
  476. /*
  477. * Channels with event associations will be triggered by their hardware
  478. * events, and channels without such associations will be triggered by
  479. * software. (At this writing there is no interface for using software
  480. * triggers except with channels that don't support hardware triggers.)
  481. */
  482. static void edma_start(struct edma_chan *echan)
  483. {
  484. struct edma_cc *ecc = echan->ecc;
  485. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  486. int idx = EDMA_REG_ARRAY_INDEX(channel);
  487. int ch_bit = EDMA_CHANNEL_BIT(channel);
  488. if (!echan->hw_triggered) {
  489. /* EDMA channels without event association */
  490. dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
  491. edma_shadow0_read_array(ecc, SH_ESR, idx));
  492. edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
  493. } else {
  494. /* EDMA channel with event association */
  495. dev_dbg(ecc->dev, "ER%d %08x\n", idx,
  496. edma_shadow0_read_array(ecc, SH_ER, idx));
  497. /* Clear any pending event or error */
  498. edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
  499. edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
  500. /* Clear any SER */
  501. edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
  502. edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
  503. dev_dbg(ecc->dev, "EER%d %08x\n", idx,
  504. edma_shadow0_read_array(ecc, SH_EER, idx));
  505. }
  506. }
  507. static void edma_stop(struct edma_chan *echan)
  508. {
  509. struct edma_cc *ecc = echan->ecc;
  510. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  511. int idx = EDMA_REG_ARRAY_INDEX(channel);
  512. int ch_bit = EDMA_CHANNEL_BIT(channel);
  513. edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
  514. edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
  515. edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
  516. edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
  517. /* clear possibly pending completion interrupt */
  518. edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
  519. dev_dbg(ecc->dev, "EER%d %08x\n", idx,
  520. edma_shadow0_read_array(ecc, SH_EER, idx));
  521. /* REVISIT: consider guarding against inappropriate event
  522. * chaining by overwriting with dummy_paramset.
  523. */
  524. }
  525. /*
  526. * Temporarily disable EDMA hardware events on the specified channel,
  527. * preventing them from triggering new transfers
  528. */
  529. static void edma_pause(struct edma_chan *echan)
  530. {
  531. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  532. edma_shadow0_write_array(echan->ecc, SH_EECR,
  533. EDMA_REG_ARRAY_INDEX(channel),
  534. EDMA_CHANNEL_BIT(channel));
  535. }
  536. /* Re-enable EDMA hardware events on the specified channel. */
  537. static void edma_resume(struct edma_chan *echan)
  538. {
  539. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  540. edma_shadow0_write_array(echan->ecc, SH_EESR,
  541. EDMA_REG_ARRAY_INDEX(channel),
  542. EDMA_CHANNEL_BIT(channel));
  543. }
  544. static void edma_trigger_channel(struct edma_chan *echan)
  545. {
  546. struct edma_cc *ecc = echan->ecc;
  547. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  548. int idx = EDMA_REG_ARRAY_INDEX(channel);
  549. int ch_bit = EDMA_CHANNEL_BIT(channel);
  550. edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
  551. dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
  552. edma_shadow0_read_array(ecc, SH_ESR, idx));
  553. }
  554. static void edma_clean_channel(struct edma_chan *echan)
  555. {
  556. struct edma_cc *ecc = echan->ecc;
  557. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  558. int idx = EDMA_REG_ARRAY_INDEX(channel);
  559. int ch_bit = EDMA_CHANNEL_BIT(channel);
  560. dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
  561. edma_read_array(ecc, EDMA_EMR, idx));
  562. edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
  563. /* Clear the corresponding EMR bits */
  564. edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
  565. /* Clear any SER */
  566. edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
  567. edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
  568. }
  569. /* Move channel to a specific event queue */
  570. static void edma_assign_channel_eventq(struct edma_chan *echan,
  571. enum dma_event_q eventq_no)
  572. {
  573. struct edma_cc *ecc = echan->ecc;
  574. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  575. int bit = (channel & 0x7) * 4;
  576. /* default to low priority queue */
  577. if (eventq_no == EVENTQ_DEFAULT)
  578. eventq_no = ecc->default_queue;
  579. if (eventq_no >= ecc->num_tc)
  580. return;
  581. eventq_no &= 7;
  582. edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
  583. eventq_no << bit);
  584. }
  585. static int edma_alloc_channel(struct edma_chan *echan,
  586. enum dma_event_q eventq_no)
  587. {
  588. struct edma_cc *ecc = echan->ecc;
  589. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  590. if (!test_bit(echan->ch_num, ecc->channels_mask)) {
  591. dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
  592. echan->ch_num);
  593. return -EINVAL;
  594. }
  595. /* ensure access through shadow region 0 */
  596. edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
  597. EDMA_CHANNEL_BIT(channel));
  598. /* ensure no events are pending */
  599. edma_stop(echan);
  600. edma_setup_interrupt(echan, true);
  601. edma_assign_channel_eventq(echan, eventq_no);
  602. return 0;
  603. }
  604. static void edma_free_channel(struct edma_chan *echan)
  605. {
  606. /* ensure no events are pending */
  607. edma_stop(echan);
  608. /* REVISIT should probably take out of shadow region 0 */
  609. edma_setup_interrupt(echan, false);
  610. }
  611. static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
  612. {
  613. return container_of(c, struct edma_chan, vchan.chan);
  614. }
  615. static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
  616. {
  617. return container_of(tx, struct edma_desc, vdesc.tx);
  618. }
  619. static void edma_desc_free(struct virt_dma_desc *vdesc)
  620. {
  621. kfree(container_of(vdesc, struct edma_desc, vdesc));
  622. }
  623. /* Dispatch a queued descriptor to the controller (caller holds lock) */
  624. static void edma_execute(struct edma_chan *echan)
  625. {
  626. struct edma_cc *ecc = echan->ecc;
  627. struct virt_dma_desc *vdesc;
  628. struct edma_desc *edesc;
  629. struct device *dev = echan->vchan.chan.device->dev;
  630. int i, j, left, nslots;
  631. if (!echan->edesc) {
  632. /* Setup is needed for the first transfer */
  633. vdesc = vchan_next_desc(&echan->vchan);
  634. if (!vdesc)
  635. return;
  636. list_del(&vdesc->node);
  637. echan->edesc = to_edma_desc(&vdesc->tx);
  638. }
  639. edesc = echan->edesc;
  640. /* Find out how many left */
  641. left = edesc->pset_nr - edesc->processed;
  642. nslots = min(MAX_NR_SG, left);
  643. edesc->sg_len = 0;
  644. /* Write descriptor PaRAM set(s) */
  645. for (i = 0; i < nslots; i++) {
  646. j = i + edesc->processed;
  647. edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
  648. edesc->sg_len += edesc->pset[j].len;
  649. dev_vdbg(dev,
  650. "\n pset[%d]:\n"
  651. " chnum\t%d\n"
  652. " slot\t%d\n"
  653. " opt\t%08x\n"
  654. " src\t%08x\n"
  655. " dst\t%08x\n"
  656. " abcnt\t%08x\n"
  657. " ccnt\t%08x\n"
  658. " bidx\t%08x\n"
  659. " cidx\t%08x\n"
  660. " lkrld\t%08x\n",
  661. j, echan->ch_num, echan->slot[i],
  662. edesc->pset[j].param.opt,
  663. edesc->pset[j].param.src,
  664. edesc->pset[j].param.dst,
  665. edesc->pset[j].param.a_b_cnt,
  666. edesc->pset[j].param.ccnt,
  667. edesc->pset[j].param.src_dst_bidx,
  668. edesc->pset[j].param.src_dst_cidx,
  669. edesc->pset[j].param.link_bcntrld);
  670. /* Link to the previous slot if not the last set */
  671. if (i != (nslots - 1))
  672. edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
  673. }
  674. edesc->processed += nslots;
  675. /*
  676. * If this is either the last set in a set of SG-list transactions
  677. * then setup a link to the dummy slot, this results in all future
  678. * events being absorbed and that's OK because we're done
  679. */
  680. if (edesc->processed == edesc->pset_nr) {
  681. if (edesc->cyclic)
  682. edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
  683. else
  684. edma_link(ecc, echan->slot[nslots - 1],
  685. echan->ecc->dummy_slot);
  686. }
  687. if (echan->missed) {
  688. /*
  689. * This happens due to setup times between intermediate
  690. * transfers in long SG lists which have to be broken up into
  691. * transfers of MAX_NR_SG
  692. */
  693. dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
  694. edma_clean_channel(echan);
  695. edma_stop(echan);
  696. edma_start(echan);
  697. edma_trigger_channel(echan);
  698. echan->missed = 0;
  699. } else if (edesc->processed <= MAX_NR_SG) {
  700. dev_dbg(dev, "first transfer starting on channel %d\n",
  701. echan->ch_num);
  702. edma_start(echan);
  703. } else {
  704. dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
  705. echan->ch_num, edesc->processed);
  706. edma_resume(echan);
  707. }
  708. }
  709. static int edma_terminate_all(struct dma_chan *chan)
  710. {
  711. struct edma_chan *echan = to_edma_chan(chan);
  712. unsigned long flags;
  713. LIST_HEAD(head);
  714. spin_lock_irqsave(&echan->vchan.lock, flags);
  715. /*
  716. * Stop DMA activity: we assume the callback will not be called
  717. * after edma_dma() returns (even if it does, it will see
  718. * echan->edesc is NULL and exit.)
  719. */
  720. if (echan->edesc) {
  721. edma_stop(echan);
  722. /* Move the cyclic channel back to default queue */
  723. if (!echan->tc && echan->edesc->cyclic)
  724. edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
  725. vchan_terminate_vdesc(&echan->edesc->vdesc);
  726. echan->edesc = NULL;
  727. }
  728. vchan_get_all_descriptors(&echan->vchan, &head);
  729. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  730. vchan_dma_desc_free_list(&echan->vchan, &head);
  731. return 0;
  732. }
  733. static void edma_synchronize(struct dma_chan *chan)
  734. {
  735. struct edma_chan *echan = to_edma_chan(chan);
  736. vchan_synchronize(&echan->vchan);
  737. }
  738. static int edma_slave_config(struct dma_chan *chan,
  739. struct dma_slave_config *cfg)
  740. {
  741. struct edma_chan *echan = to_edma_chan(chan);
  742. if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  743. cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  744. return -EINVAL;
  745. if (cfg->src_maxburst > chan->device->max_burst ||
  746. cfg->dst_maxburst > chan->device->max_burst)
  747. return -EINVAL;
  748. memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
  749. return 0;
  750. }
  751. static int edma_dma_pause(struct dma_chan *chan)
  752. {
  753. struct edma_chan *echan = to_edma_chan(chan);
  754. if (!echan->edesc)
  755. return -EINVAL;
  756. edma_pause(echan);
  757. return 0;
  758. }
  759. static int edma_dma_resume(struct dma_chan *chan)
  760. {
  761. struct edma_chan *echan = to_edma_chan(chan);
  762. edma_resume(echan);
  763. return 0;
  764. }
  765. /*
  766. * A PaRAM set configuration abstraction used by other modes
  767. * @chan: Channel who's PaRAM set we're configuring
  768. * @pset: PaRAM set to initialize and setup.
  769. * @src_addr: Source address of the DMA
  770. * @dst_addr: Destination address of the DMA
  771. * @burst: In units of dev_width, how much to send
  772. * @dev_width: How much is the dev_width
  773. * @dma_length: Total length of the DMA transfer
  774. * @direction: Direction of the transfer
  775. */
  776. static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
  777. dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
  778. unsigned int acnt, unsigned int dma_length,
  779. enum dma_transfer_direction direction)
  780. {
  781. struct edma_chan *echan = to_edma_chan(chan);
  782. struct device *dev = chan->device->dev;
  783. struct edmacc_param *param = &epset->param;
  784. int bcnt, ccnt, cidx;
  785. int src_bidx, dst_bidx, src_cidx, dst_cidx;
  786. int absync;
  787. /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
  788. if (!burst)
  789. burst = 1;
  790. /*
  791. * If the maxburst is equal to the fifo width, use
  792. * A-synced transfers. This allows for large contiguous
  793. * buffer transfers using only one PaRAM set.
  794. */
  795. if (burst == 1) {
  796. /*
  797. * For the A-sync case, bcnt and ccnt are the remainder
  798. * and quotient respectively of the division of:
  799. * (dma_length / acnt) by (SZ_64K -1). This is so
  800. * that in case bcnt over flows, we have ccnt to use.
  801. * Note: In A-sync transfer only, bcntrld is used, but it
  802. * only applies for sg_dma_len(sg) >= SZ_64K.
  803. * In this case, the best way adopted is- bccnt for the
  804. * first frame will be the remainder below. Then for
  805. * every successive frame, bcnt will be SZ_64K-1. This
  806. * is assured as bcntrld = 0xffff in end of function.
  807. */
  808. absync = false;
  809. ccnt = dma_length / acnt / (SZ_64K - 1);
  810. bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
  811. /*
  812. * If bcnt is non-zero, we have a remainder and hence an
  813. * extra frame to transfer, so increment ccnt.
  814. */
  815. if (bcnt)
  816. ccnt++;
  817. else
  818. bcnt = SZ_64K - 1;
  819. cidx = acnt;
  820. } else {
  821. /*
  822. * If maxburst is greater than the fifo address_width,
  823. * use AB-synced transfers where A count is the fifo
  824. * address_width and B count is the maxburst. In this
  825. * case, we are limited to transfers of C count frames
  826. * of (address_width * maxburst) where C count is limited
  827. * to SZ_64K-1. This places an upper bound on the length
  828. * of an SG segment that can be handled.
  829. */
  830. absync = true;
  831. bcnt = burst;
  832. ccnt = dma_length / (acnt * bcnt);
  833. if (ccnt > (SZ_64K - 1)) {
  834. dev_err(dev, "Exceeded max SG segment size\n");
  835. return -EINVAL;
  836. }
  837. cidx = acnt * bcnt;
  838. }
  839. epset->len = dma_length;
  840. if (direction == DMA_MEM_TO_DEV) {
  841. src_bidx = acnt;
  842. src_cidx = cidx;
  843. dst_bidx = 0;
  844. dst_cidx = 0;
  845. epset->addr = src_addr;
  846. } else if (direction == DMA_DEV_TO_MEM) {
  847. src_bidx = 0;
  848. src_cidx = 0;
  849. dst_bidx = acnt;
  850. dst_cidx = cidx;
  851. epset->addr = dst_addr;
  852. } else if (direction == DMA_MEM_TO_MEM) {
  853. src_bidx = acnt;
  854. src_cidx = cidx;
  855. dst_bidx = acnt;
  856. dst_cidx = cidx;
  857. epset->addr = src_addr;
  858. } else {
  859. dev_err(dev, "%s: direction not implemented yet\n", __func__);
  860. return -EINVAL;
  861. }
  862. param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
  863. /* Configure A or AB synchronized transfers */
  864. if (absync)
  865. param->opt |= SYNCDIM;
  866. param->src = src_addr;
  867. param->dst = dst_addr;
  868. param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
  869. param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
  870. param->a_b_cnt = bcnt << 16 | acnt;
  871. param->ccnt = ccnt;
  872. /*
  873. * Only time when (bcntrld) auto reload is required is for
  874. * A-sync case, and in this case, a requirement of reload value
  875. * of SZ_64K-1 only is assured. 'link' is initially set to NULL
  876. * and then later will be populated by edma_execute.
  877. */
  878. param->link_bcntrld = 0xffffffff;
  879. return absync;
  880. }
  881. static struct dma_async_tx_descriptor *edma_prep_slave_sg(
  882. struct dma_chan *chan, struct scatterlist *sgl,
  883. unsigned int sg_len, enum dma_transfer_direction direction,
  884. unsigned long tx_flags, void *context)
  885. {
  886. struct edma_chan *echan = to_edma_chan(chan);
  887. struct device *dev = chan->device->dev;
  888. struct edma_desc *edesc;
  889. dma_addr_t src_addr = 0, dst_addr = 0;
  890. enum dma_slave_buswidth dev_width;
  891. u32 burst;
  892. struct scatterlist *sg;
  893. int i, nslots, ret;
  894. if (unlikely(!echan || !sgl || !sg_len))
  895. return NULL;
  896. if (direction == DMA_DEV_TO_MEM) {
  897. src_addr = echan->cfg.src_addr;
  898. dev_width = echan->cfg.src_addr_width;
  899. burst = echan->cfg.src_maxburst;
  900. } else if (direction == DMA_MEM_TO_DEV) {
  901. dst_addr = echan->cfg.dst_addr;
  902. dev_width = echan->cfg.dst_addr_width;
  903. burst = echan->cfg.dst_maxburst;
  904. } else {
  905. dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
  906. return NULL;
  907. }
  908. if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  909. dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
  910. return NULL;
  911. }
  912. edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
  913. if (!edesc)
  914. return NULL;
  915. edesc->pset_nr = sg_len;
  916. edesc->residue = 0;
  917. edesc->direction = direction;
  918. edesc->echan = echan;
  919. /* Allocate a PaRAM slot, if needed */
  920. nslots = min_t(unsigned, MAX_NR_SG, sg_len);
  921. for (i = 0; i < nslots; i++) {
  922. if (echan->slot[i] < 0) {
  923. echan->slot[i] =
  924. edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
  925. if (echan->slot[i] < 0) {
  926. kfree(edesc);
  927. dev_err(dev, "%s: Failed to allocate slot\n",
  928. __func__);
  929. return NULL;
  930. }
  931. }
  932. }
  933. /* Configure PaRAM sets for each SG */
  934. for_each_sg(sgl, sg, sg_len, i) {
  935. /* Get address for each SG */
  936. if (direction == DMA_DEV_TO_MEM)
  937. dst_addr = sg_dma_address(sg);
  938. else
  939. src_addr = sg_dma_address(sg);
  940. ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
  941. dst_addr, burst, dev_width,
  942. sg_dma_len(sg), direction);
  943. if (ret < 0) {
  944. kfree(edesc);
  945. return NULL;
  946. }
  947. edesc->absync = ret;
  948. edesc->residue += sg_dma_len(sg);
  949. if (i == sg_len - 1)
  950. /* Enable completion interrupt */
  951. edesc->pset[i].param.opt |= TCINTEN;
  952. else if (!((i+1) % MAX_NR_SG))
  953. /*
  954. * Enable early completion interrupt for the
  955. * intermediateset. In this case the driver will be
  956. * notified when the paRAM set is submitted to TC. This
  957. * will allow more time to set up the next set of slots.
  958. */
  959. edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
  960. }
  961. edesc->residue_stat = edesc->residue;
  962. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  963. }
  964. static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
  965. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  966. size_t len, unsigned long tx_flags)
  967. {
  968. int ret, nslots;
  969. struct edma_desc *edesc;
  970. struct device *dev = chan->device->dev;
  971. struct edma_chan *echan = to_edma_chan(chan);
  972. unsigned int width, pset_len, array_size;
  973. if (unlikely(!echan || !len))
  974. return NULL;
  975. /* Align the array size (acnt block) with the transfer properties */
  976. switch (__ffs((src | dest | len))) {
  977. case 0:
  978. array_size = SZ_32K - 1;
  979. break;
  980. case 1:
  981. array_size = SZ_32K - 2;
  982. break;
  983. default:
  984. array_size = SZ_32K - 4;
  985. break;
  986. }
  987. if (len < SZ_64K) {
  988. /*
  989. * Transfer size less than 64K can be handled with one paRAM
  990. * slot and with one burst.
  991. * ACNT = length
  992. */
  993. width = len;
  994. pset_len = len;
  995. nslots = 1;
  996. } else {
  997. /*
  998. * Transfer size bigger than 64K will be handled with maximum of
  999. * two paRAM slots.
  1000. * slot1: (full_length / 32767) times 32767 bytes bursts.
  1001. * ACNT = 32767, length1: (full_length / 32767) * 32767
  1002. * slot2: the remaining amount of data after slot1.
  1003. * ACNT = full_length - length1, length2 = ACNT
  1004. *
  1005. * When the full_length is a multiple of 32767 one slot can be
  1006. * used to complete the transfer.
  1007. */
  1008. width = array_size;
  1009. pset_len = rounddown(len, width);
  1010. /* One slot is enough for lengths multiple of (SZ_32K -1) */
  1011. if (unlikely(pset_len == len))
  1012. nslots = 1;
  1013. else
  1014. nslots = 2;
  1015. }
  1016. edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
  1017. if (!edesc)
  1018. return NULL;
  1019. edesc->pset_nr = nslots;
  1020. edesc->residue = edesc->residue_stat = len;
  1021. edesc->direction = DMA_MEM_TO_MEM;
  1022. edesc->echan = echan;
  1023. ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
  1024. width, pset_len, DMA_MEM_TO_MEM);
  1025. if (ret < 0) {
  1026. kfree(edesc);
  1027. return NULL;
  1028. }
  1029. edesc->absync = ret;
  1030. edesc->pset[0].param.opt |= ITCCHEN;
  1031. if (nslots == 1) {
  1032. /* Enable transfer complete interrupt if requested */
  1033. if (tx_flags & DMA_PREP_INTERRUPT)
  1034. edesc->pset[0].param.opt |= TCINTEN;
  1035. } else {
  1036. /* Enable transfer complete chaining for the first slot */
  1037. edesc->pset[0].param.opt |= TCCHEN;
  1038. if (echan->slot[1] < 0) {
  1039. echan->slot[1] = edma_alloc_slot(echan->ecc,
  1040. EDMA_SLOT_ANY);
  1041. if (echan->slot[1] < 0) {
  1042. kfree(edesc);
  1043. dev_err(dev, "%s: Failed to allocate slot\n",
  1044. __func__);
  1045. return NULL;
  1046. }
  1047. }
  1048. dest += pset_len;
  1049. src += pset_len;
  1050. pset_len = width = len % array_size;
  1051. ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
  1052. width, pset_len, DMA_MEM_TO_MEM);
  1053. if (ret < 0) {
  1054. kfree(edesc);
  1055. return NULL;
  1056. }
  1057. edesc->pset[1].param.opt |= ITCCHEN;
  1058. /* Enable transfer complete interrupt if requested */
  1059. if (tx_flags & DMA_PREP_INTERRUPT)
  1060. edesc->pset[1].param.opt |= TCINTEN;
  1061. }
  1062. if (!(tx_flags & DMA_PREP_INTERRUPT))
  1063. edesc->polled = true;
  1064. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  1065. }
  1066. static struct dma_async_tx_descriptor *
  1067. edma_prep_dma_interleaved(struct dma_chan *chan,
  1068. struct dma_interleaved_template *xt,
  1069. unsigned long tx_flags)
  1070. {
  1071. struct device *dev = chan->device->dev;
  1072. struct edma_chan *echan = to_edma_chan(chan);
  1073. struct edmacc_param *param;
  1074. struct edma_desc *edesc;
  1075. size_t src_icg, dst_icg;
  1076. int src_bidx, dst_bidx;
  1077. /* Slave mode is not supported */
  1078. if (is_slave_direction(xt->dir))
  1079. return NULL;
  1080. if (xt->frame_size != 1 || xt->numf == 0)
  1081. return NULL;
  1082. if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
  1083. return NULL;
  1084. src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
  1085. if (src_icg) {
  1086. src_bidx = src_icg + xt->sgl[0].size;
  1087. } else if (xt->src_inc) {
  1088. src_bidx = xt->sgl[0].size;
  1089. } else {
  1090. dev_err(dev, "%s: SRC constant addressing is not supported\n",
  1091. __func__);
  1092. return NULL;
  1093. }
  1094. dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
  1095. if (dst_icg) {
  1096. dst_bidx = dst_icg + xt->sgl[0].size;
  1097. } else if (xt->dst_inc) {
  1098. dst_bidx = xt->sgl[0].size;
  1099. } else {
  1100. dev_err(dev, "%s: DST constant addressing is not supported\n",
  1101. __func__);
  1102. return NULL;
  1103. }
  1104. if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
  1105. return NULL;
  1106. edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
  1107. if (!edesc)
  1108. return NULL;
  1109. edesc->direction = DMA_MEM_TO_MEM;
  1110. edesc->echan = echan;
  1111. edesc->pset_nr = 1;
  1112. param = &edesc->pset[0].param;
  1113. param->src = xt->src_start;
  1114. param->dst = xt->dst_start;
  1115. param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
  1116. param->ccnt = 1;
  1117. param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
  1118. param->src_dst_cidx = 0;
  1119. param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
  1120. param->opt |= ITCCHEN;
  1121. /* Enable transfer complete interrupt if requested */
  1122. if (tx_flags & DMA_PREP_INTERRUPT)
  1123. param->opt |= TCINTEN;
  1124. else
  1125. edesc->polled = true;
  1126. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  1127. }
  1128. static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
  1129. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  1130. size_t period_len, enum dma_transfer_direction direction,
  1131. unsigned long tx_flags)
  1132. {
  1133. struct edma_chan *echan = to_edma_chan(chan);
  1134. struct device *dev = chan->device->dev;
  1135. struct edma_desc *edesc;
  1136. dma_addr_t src_addr, dst_addr;
  1137. enum dma_slave_buswidth dev_width;
  1138. bool use_intermediate = false;
  1139. u32 burst;
  1140. int i, ret, nslots;
  1141. if (unlikely(!echan || !buf_len || !period_len))
  1142. return NULL;
  1143. if (direction == DMA_DEV_TO_MEM) {
  1144. src_addr = echan->cfg.src_addr;
  1145. dst_addr = buf_addr;
  1146. dev_width = echan->cfg.src_addr_width;
  1147. burst = echan->cfg.src_maxburst;
  1148. } else if (direction == DMA_MEM_TO_DEV) {
  1149. src_addr = buf_addr;
  1150. dst_addr = echan->cfg.dst_addr;
  1151. dev_width = echan->cfg.dst_addr_width;
  1152. burst = echan->cfg.dst_maxburst;
  1153. } else {
  1154. dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
  1155. return NULL;
  1156. }
  1157. if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  1158. dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
  1159. return NULL;
  1160. }
  1161. if (unlikely(buf_len % period_len)) {
  1162. dev_err(dev, "Period should be multiple of Buffer length\n");
  1163. return NULL;
  1164. }
  1165. nslots = (buf_len / period_len) + 1;
  1166. /*
  1167. * Cyclic DMA users such as audio cannot tolerate delays introduced
  1168. * by cases where the number of periods is more than the maximum
  1169. * number of SGs the EDMA driver can handle at a time. For DMA types
  1170. * such as Slave SGs, such delays are tolerable and synchronized,
  1171. * but the synchronization is difficult to achieve with Cyclic and
  1172. * cannot be guaranteed, so we error out early.
  1173. */
  1174. if (nslots > MAX_NR_SG) {
  1175. /*
  1176. * If the burst and period sizes are the same, we can put
  1177. * the full buffer into a single period and activate
  1178. * intermediate interrupts. This will produce interrupts
  1179. * after each burst, which is also after each desired period.
  1180. */
  1181. if (burst == period_len) {
  1182. period_len = buf_len;
  1183. nslots = 2;
  1184. use_intermediate = true;
  1185. } else {
  1186. return NULL;
  1187. }
  1188. }
  1189. edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
  1190. if (!edesc)
  1191. return NULL;
  1192. edesc->cyclic = 1;
  1193. edesc->pset_nr = nslots;
  1194. edesc->residue = edesc->residue_stat = buf_len;
  1195. edesc->direction = direction;
  1196. edesc->echan = echan;
  1197. dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
  1198. __func__, echan->ch_num, nslots, period_len, buf_len);
  1199. for (i = 0; i < nslots; i++) {
  1200. /* Allocate a PaRAM slot, if needed */
  1201. if (echan->slot[i] < 0) {
  1202. echan->slot[i] =
  1203. edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
  1204. if (echan->slot[i] < 0) {
  1205. kfree(edesc);
  1206. dev_err(dev, "%s: Failed to allocate slot\n",
  1207. __func__);
  1208. return NULL;
  1209. }
  1210. }
  1211. if (i == nslots - 1) {
  1212. memcpy(&edesc->pset[i], &edesc->pset[0],
  1213. sizeof(edesc->pset[0]));
  1214. break;
  1215. }
  1216. ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
  1217. dst_addr, burst, dev_width, period_len,
  1218. direction);
  1219. if (ret < 0) {
  1220. kfree(edesc);
  1221. return NULL;
  1222. }
  1223. if (direction == DMA_DEV_TO_MEM)
  1224. dst_addr += period_len;
  1225. else
  1226. src_addr += period_len;
  1227. dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
  1228. dev_vdbg(dev,
  1229. "\n pset[%d]:\n"
  1230. " chnum\t%d\n"
  1231. " slot\t%d\n"
  1232. " opt\t%08x\n"
  1233. " src\t%08x\n"
  1234. " dst\t%08x\n"
  1235. " abcnt\t%08x\n"
  1236. " ccnt\t%08x\n"
  1237. " bidx\t%08x\n"
  1238. " cidx\t%08x\n"
  1239. " lkrld\t%08x\n",
  1240. i, echan->ch_num, echan->slot[i],
  1241. edesc->pset[i].param.opt,
  1242. edesc->pset[i].param.src,
  1243. edesc->pset[i].param.dst,
  1244. edesc->pset[i].param.a_b_cnt,
  1245. edesc->pset[i].param.ccnt,
  1246. edesc->pset[i].param.src_dst_bidx,
  1247. edesc->pset[i].param.src_dst_cidx,
  1248. edesc->pset[i].param.link_bcntrld);
  1249. edesc->absync = ret;
  1250. /*
  1251. * Enable period interrupt only if it is requested
  1252. */
  1253. if (tx_flags & DMA_PREP_INTERRUPT) {
  1254. edesc->pset[i].param.opt |= TCINTEN;
  1255. /* Also enable intermediate interrupts if necessary */
  1256. if (use_intermediate)
  1257. edesc->pset[i].param.opt |= ITCINTEN;
  1258. }
  1259. }
  1260. /* Place the cyclic channel to highest priority queue */
  1261. if (!echan->tc)
  1262. edma_assign_channel_eventq(echan, EVENTQ_0);
  1263. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  1264. }
  1265. static void edma_completion_handler(struct edma_chan *echan)
  1266. {
  1267. struct device *dev = echan->vchan.chan.device->dev;
  1268. struct edma_desc *edesc;
  1269. spin_lock(&echan->vchan.lock);
  1270. edesc = echan->edesc;
  1271. if (edesc) {
  1272. if (edesc->cyclic) {
  1273. vchan_cyclic_callback(&edesc->vdesc);
  1274. spin_unlock(&echan->vchan.lock);
  1275. return;
  1276. } else if (edesc->processed == edesc->pset_nr) {
  1277. edesc->residue = 0;
  1278. edma_stop(echan);
  1279. vchan_cookie_complete(&edesc->vdesc);
  1280. echan->edesc = NULL;
  1281. dev_dbg(dev, "Transfer completed on channel %d\n",
  1282. echan->ch_num);
  1283. } else {
  1284. dev_dbg(dev, "Sub transfer completed on channel %d\n",
  1285. echan->ch_num);
  1286. edma_pause(echan);
  1287. /* Update statistics for tx_status */
  1288. edesc->residue -= edesc->sg_len;
  1289. edesc->residue_stat = edesc->residue;
  1290. edesc->processed_stat = edesc->processed;
  1291. }
  1292. edma_execute(echan);
  1293. }
  1294. spin_unlock(&echan->vchan.lock);
  1295. }
  1296. /* eDMA interrupt handler */
  1297. static irqreturn_t dma_irq_handler(int irq, void *data)
  1298. {
  1299. struct edma_cc *ecc = data;
  1300. int ctlr;
  1301. u32 sh_ier;
  1302. u32 sh_ipr;
  1303. u32 bank;
  1304. ctlr = ecc->id;
  1305. if (ctlr < 0)
  1306. return IRQ_NONE;
  1307. dev_vdbg(ecc->dev, "dma_irq_handler\n");
  1308. sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
  1309. if (!sh_ipr) {
  1310. sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
  1311. if (!sh_ipr)
  1312. return IRQ_NONE;
  1313. sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
  1314. bank = 1;
  1315. } else {
  1316. sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
  1317. bank = 0;
  1318. }
  1319. do {
  1320. u32 slot;
  1321. u32 channel;
  1322. slot = __ffs(sh_ipr);
  1323. sh_ipr &= ~(BIT(slot));
  1324. if (sh_ier & BIT(slot)) {
  1325. channel = (bank << 5) | slot;
  1326. /* Clear the corresponding IPR bits */
  1327. edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
  1328. edma_completion_handler(&ecc->slave_chans[channel]);
  1329. }
  1330. } while (sh_ipr);
  1331. edma_shadow0_write(ecc, SH_IEVAL, 1);
  1332. return IRQ_HANDLED;
  1333. }
  1334. static void edma_error_handler(struct edma_chan *echan)
  1335. {
  1336. struct edma_cc *ecc = echan->ecc;
  1337. struct device *dev = echan->vchan.chan.device->dev;
  1338. struct edmacc_param p;
  1339. int err;
  1340. if (!echan->edesc)
  1341. return;
  1342. spin_lock(&echan->vchan.lock);
  1343. err = edma_read_slot(ecc, echan->slot[0], &p);
  1344. /*
  1345. * Issue later based on missed flag which will be sure
  1346. * to happen as:
  1347. * (1) we finished transmitting an intermediate slot and
  1348. * edma_execute is coming up.
  1349. * (2) or we finished current transfer and issue will
  1350. * call edma_execute.
  1351. *
  1352. * Important note: issuing can be dangerous here and
  1353. * lead to some nasty recursion when we are in a NULL
  1354. * slot. So we avoid doing so and set the missed flag.
  1355. */
  1356. if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
  1357. dev_dbg(dev, "Error on null slot, setting miss\n");
  1358. echan->missed = 1;
  1359. } else {
  1360. /*
  1361. * The slot is already programmed but the event got
  1362. * missed, so its safe to issue it here.
  1363. */
  1364. dev_dbg(dev, "Missed event, TRIGGERING\n");
  1365. edma_clean_channel(echan);
  1366. edma_stop(echan);
  1367. edma_start(echan);
  1368. edma_trigger_channel(echan);
  1369. }
  1370. spin_unlock(&echan->vchan.lock);
  1371. }
  1372. static inline bool edma_error_pending(struct edma_cc *ecc)
  1373. {
  1374. if (edma_read_array(ecc, EDMA_EMR, 0) ||
  1375. edma_read_array(ecc, EDMA_EMR, 1) ||
  1376. edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
  1377. return true;
  1378. return false;
  1379. }
  1380. /* eDMA error interrupt handler */
  1381. static irqreturn_t dma_ccerr_handler(int irq, void *data)
  1382. {
  1383. struct edma_cc *ecc = data;
  1384. int i, j;
  1385. int ctlr;
  1386. unsigned int cnt = 0;
  1387. unsigned int val;
  1388. ctlr = ecc->id;
  1389. if (ctlr < 0)
  1390. return IRQ_NONE;
  1391. dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
  1392. if (!edma_error_pending(ecc)) {
  1393. /*
  1394. * The registers indicate no pending error event but the irq
  1395. * handler has been called.
  1396. * Ask eDMA to re-evaluate the error registers.
  1397. */
  1398. dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
  1399. __func__);
  1400. edma_write(ecc, EDMA_EEVAL, 1);
  1401. return IRQ_NONE;
  1402. }
  1403. while (1) {
  1404. /* Event missed register(s) */
  1405. for (j = 0; j < 2; j++) {
  1406. unsigned long emr;
  1407. val = edma_read_array(ecc, EDMA_EMR, j);
  1408. if (!val)
  1409. continue;
  1410. dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
  1411. emr = val;
  1412. for_each_set_bit(i, &emr, 32) {
  1413. int k = (j << 5) + i;
  1414. /* Clear the corresponding EMR bits */
  1415. edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
  1416. /* Clear any SER */
  1417. edma_shadow0_write_array(ecc, SH_SECR, j,
  1418. BIT(i));
  1419. edma_error_handler(&ecc->slave_chans[k]);
  1420. }
  1421. }
  1422. val = edma_read(ecc, EDMA_QEMR);
  1423. if (val) {
  1424. dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
  1425. /* Not reported, just clear the interrupt reason. */
  1426. edma_write(ecc, EDMA_QEMCR, val);
  1427. edma_shadow0_write(ecc, SH_QSECR, val);
  1428. }
  1429. val = edma_read(ecc, EDMA_CCERR);
  1430. if (val) {
  1431. dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
  1432. /* Not reported, just clear the interrupt reason. */
  1433. edma_write(ecc, EDMA_CCERRCLR, val);
  1434. }
  1435. if (!edma_error_pending(ecc))
  1436. break;
  1437. cnt++;
  1438. if (cnt > 10)
  1439. break;
  1440. }
  1441. edma_write(ecc, EDMA_EEVAL, 1);
  1442. return IRQ_HANDLED;
  1443. }
  1444. /* Alloc channel resources */
  1445. static int edma_alloc_chan_resources(struct dma_chan *chan)
  1446. {
  1447. struct edma_chan *echan = to_edma_chan(chan);
  1448. struct edma_cc *ecc = echan->ecc;
  1449. struct device *dev = ecc->dev;
  1450. enum dma_event_q eventq_no = EVENTQ_DEFAULT;
  1451. int ret;
  1452. if (echan->tc) {
  1453. eventq_no = echan->tc->id;
  1454. } else if (ecc->tc_list) {
  1455. /* memcpy channel */
  1456. echan->tc = &ecc->tc_list[ecc->info->default_queue];
  1457. eventq_no = echan->tc->id;
  1458. }
  1459. ret = edma_alloc_channel(echan, eventq_no);
  1460. if (ret)
  1461. return ret;
  1462. echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
  1463. if (echan->slot[0] < 0) {
  1464. dev_err(dev, "Entry slot allocation failed for channel %u\n",
  1465. EDMA_CHAN_SLOT(echan->ch_num));
  1466. ret = echan->slot[0];
  1467. goto err_slot;
  1468. }
  1469. /* Set up channel -> slot mapping for the entry slot */
  1470. edma_set_chmap(echan, echan->slot[0]);
  1471. echan->alloced = true;
  1472. dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
  1473. EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
  1474. echan->hw_triggered ? "HW" : "SW");
  1475. return 0;
  1476. err_slot:
  1477. edma_free_channel(echan);
  1478. return ret;
  1479. }
  1480. /* Free channel resources */
  1481. static void edma_free_chan_resources(struct dma_chan *chan)
  1482. {
  1483. struct edma_chan *echan = to_edma_chan(chan);
  1484. struct device *dev = echan->ecc->dev;
  1485. int i;
  1486. /* Terminate transfers */
  1487. edma_stop(echan);
  1488. vchan_free_chan_resources(&echan->vchan);
  1489. /* Free EDMA PaRAM slots */
  1490. for (i = 0; i < EDMA_MAX_SLOTS; i++) {
  1491. if (echan->slot[i] >= 0) {
  1492. edma_free_slot(echan->ecc, echan->slot[i]);
  1493. echan->slot[i] = -1;
  1494. }
  1495. }
  1496. /* Set entry slot to the dummy slot */
  1497. edma_set_chmap(echan, echan->ecc->dummy_slot);
  1498. /* Free EDMA channel */
  1499. if (echan->alloced) {
  1500. edma_free_channel(echan);
  1501. echan->alloced = false;
  1502. }
  1503. echan->tc = NULL;
  1504. echan->hw_triggered = false;
  1505. dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
  1506. EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
  1507. }
  1508. /* Send pending descriptor to hardware */
  1509. static void edma_issue_pending(struct dma_chan *chan)
  1510. {
  1511. struct edma_chan *echan = to_edma_chan(chan);
  1512. unsigned long flags;
  1513. spin_lock_irqsave(&echan->vchan.lock, flags);
  1514. if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
  1515. edma_execute(echan);
  1516. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  1517. }
  1518. /*
  1519. * This limit exists to avoid a possible infinite loop when waiting for proof
  1520. * that a particular transfer is completed. This limit can be hit if there
  1521. * are large bursts to/from slow devices or the CPU is never able to catch
  1522. * the DMA hardware idle. On an AM335x transferring 48 bytes from the UART
  1523. * RX-FIFO, as many as 55 loops have been seen.
  1524. */
  1525. #define EDMA_MAX_TR_WAIT_LOOPS 1000
  1526. static u32 edma_residue(struct edma_desc *edesc)
  1527. {
  1528. bool dst = edesc->direction == DMA_DEV_TO_MEM;
  1529. int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
  1530. struct edma_chan *echan = edesc->echan;
  1531. struct edma_pset *pset = edesc->pset;
  1532. dma_addr_t done, pos, pos_old;
  1533. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  1534. int idx = EDMA_REG_ARRAY_INDEX(channel);
  1535. int ch_bit = EDMA_CHANNEL_BIT(channel);
  1536. int event_reg;
  1537. int i;
  1538. /*
  1539. * We always read the dst/src position from the first RamPar
  1540. * pset. That's the one which is active now.
  1541. */
  1542. pos = edma_get_position(echan->ecc, echan->slot[0], dst);
  1543. /*
  1544. * "pos" may represent a transfer request that is still being
  1545. * processed by the EDMACC or EDMATC. We will busy wait until
  1546. * any one of the situations occurs:
  1547. * 1. while and event is pending for the channel
  1548. * 2. a position updated
  1549. * 3. we hit the loop limit
  1550. */
  1551. if (is_slave_direction(edesc->direction))
  1552. event_reg = SH_ER;
  1553. else
  1554. event_reg = SH_ESR;
  1555. pos_old = pos;
  1556. while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
  1557. pos = edma_get_position(echan->ecc, echan->slot[0], dst);
  1558. if (pos != pos_old)
  1559. break;
  1560. if (!--loop_count) {
  1561. dev_dbg_ratelimited(echan->vchan.chan.device->dev,
  1562. "%s: timeout waiting for PaRAM update\n",
  1563. __func__);
  1564. break;
  1565. }
  1566. cpu_relax();
  1567. }
  1568. /*
  1569. * Cyclic is simple. Just subtract pset[0].addr from pos.
  1570. *
  1571. * We never update edesc->residue in the cyclic case, so we
  1572. * can tell the remaining room to the end of the circular
  1573. * buffer.
  1574. */
  1575. if (edesc->cyclic) {
  1576. done = pos - pset->addr;
  1577. edesc->residue_stat = edesc->residue - done;
  1578. return edesc->residue_stat;
  1579. }
  1580. /*
  1581. * If the position is 0, then EDMA loaded the closing dummy slot, the
  1582. * transfer is completed
  1583. */
  1584. if (!pos)
  1585. return 0;
  1586. /*
  1587. * For SG operation we catch up with the last processed
  1588. * status.
  1589. */
  1590. pset += edesc->processed_stat;
  1591. for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
  1592. /*
  1593. * If we are inside this pset address range, we know
  1594. * this is the active one. Get the current delta and
  1595. * stop walking the psets.
  1596. */
  1597. if (pos >= pset->addr && pos < pset->addr + pset->len)
  1598. return edesc->residue_stat - (pos - pset->addr);
  1599. /* Otherwise mark it done and update residue_stat. */
  1600. edesc->processed_stat++;
  1601. edesc->residue_stat -= pset->len;
  1602. }
  1603. return edesc->residue_stat;
  1604. }
  1605. /* Check request completion status */
  1606. static enum dma_status edma_tx_status(struct dma_chan *chan,
  1607. dma_cookie_t cookie,
  1608. struct dma_tx_state *txstate)
  1609. {
  1610. struct edma_chan *echan = to_edma_chan(chan);
  1611. struct dma_tx_state txstate_tmp;
  1612. enum dma_status ret;
  1613. unsigned long flags;
  1614. ret = dma_cookie_status(chan, cookie, txstate);
  1615. if (ret == DMA_COMPLETE)
  1616. return ret;
  1617. /* Provide a dummy dma_tx_state for completion checking */
  1618. if (!txstate)
  1619. txstate = &txstate_tmp;
  1620. spin_lock_irqsave(&echan->vchan.lock, flags);
  1621. if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
  1622. txstate->residue = edma_residue(echan->edesc);
  1623. } else {
  1624. struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
  1625. cookie);
  1626. if (vdesc)
  1627. txstate->residue = to_edma_desc(&vdesc->tx)->residue;
  1628. else
  1629. txstate->residue = 0;
  1630. }
  1631. /*
  1632. * Mark the cookie completed if the residue is 0 for non cyclic
  1633. * transfers
  1634. */
  1635. if (ret != DMA_COMPLETE && !txstate->residue &&
  1636. echan->edesc && echan->edesc->polled &&
  1637. echan->edesc->vdesc.tx.cookie == cookie) {
  1638. edma_stop(echan);
  1639. vchan_cookie_complete(&echan->edesc->vdesc);
  1640. echan->edesc = NULL;
  1641. edma_execute(echan);
  1642. ret = DMA_COMPLETE;
  1643. }
  1644. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  1645. return ret;
  1646. }
  1647. static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
  1648. {
  1649. if (!memcpy_channels)
  1650. return false;
  1651. while (*memcpy_channels != -1) {
  1652. if (*memcpy_channels == ch_num)
  1653. return true;
  1654. memcpy_channels++;
  1655. }
  1656. return false;
  1657. }
  1658. #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  1659. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  1660. BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
  1661. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  1662. static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
  1663. {
  1664. struct dma_device *s_ddev = &ecc->dma_slave;
  1665. struct dma_device *m_ddev = NULL;
  1666. s32 *memcpy_channels = ecc->info->memcpy_channels;
  1667. int i, j;
  1668. dma_cap_zero(s_ddev->cap_mask);
  1669. dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
  1670. dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
  1671. if (ecc->legacy_mode && !memcpy_channels) {
  1672. dev_warn(ecc->dev,
  1673. "Legacy memcpy is enabled, things might not work\n");
  1674. dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
  1675. dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
  1676. s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
  1677. s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
  1678. s_ddev->directions = BIT(DMA_MEM_TO_MEM);
  1679. }
  1680. s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
  1681. s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
  1682. s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
  1683. s_ddev->device_free_chan_resources = edma_free_chan_resources;
  1684. s_ddev->device_issue_pending = edma_issue_pending;
  1685. s_ddev->device_tx_status = edma_tx_status;
  1686. s_ddev->device_config = edma_slave_config;
  1687. s_ddev->device_pause = edma_dma_pause;
  1688. s_ddev->device_resume = edma_dma_resume;
  1689. s_ddev->device_terminate_all = edma_terminate_all;
  1690. s_ddev->device_synchronize = edma_synchronize;
  1691. s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
  1692. s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
  1693. s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
  1694. s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1695. s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
  1696. s_ddev->dev = ecc->dev;
  1697. INIT_LIST_HEAD(&s_ddev->channels);
  1698. if (memcpy_channels) {
  1699. m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
  1700. if (!m_ddev) {
  1701. dev_warn(ecc->dev, "memcpy is disabled due to OoM\n");
  1702. memcpy_channels = NULL;
  1703. goto ch_setup;
  1704. }
  1705. ecc->dma_memcpy = m_ddev;
  1706. dma_cap_zero(m_ddev->cap_mask);
  1707. dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
  1708. dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
  1709. m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
  1710. m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
  1711. m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
  1712. m_ddev->device_free_chan_resources = edma_free_chan_resources;
  1713. m_ddev->device_issue_pending = edma_issue_pending;
  1714. m_ddev->device_tx_status = edma_tx_status;
  1715. m_ddev->device_config = edma_slave_config;
  1716. m_ddev->device_pause = edma_dma_pause;
  1717. m_ddev->device_resume = edma_dma_resume;
  1718. m_ddev->device_terminate_all = edma_terminate_all;
  1719. m_ddev->device_synchronize = edma_synchronize;
  1720. m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
  1721. m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
  1722. m_ddev->directions = BIT(DMA_MEM_TO_MEM);
  1723. m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1724. m_ddev->dev = ecc->dev;
  1725. INIT_LIST_HEAD(&m_ddev->channels);
  1726. } else if (!ecc->legacy_mode) {
  1727. dev_info(ecc->dev, "memcpy is disabled\n");
  1728. }
  1729. ch_setup:
  1730. for (i = 0; i < ecc->num_channels; i++) {
  1731. struct edma_chan *echan = &ecc->slave_chans[i];
  1732. echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
  1733. echan->ecc = ecc;
  1734. echan->vchan.desc_free = edma_desc_free;
  1735. if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
  1736. vchan_init(&echan->vchan, m_ddev);
  1737. else
  1738. vchan_init(&echan->vchan, s_ddev);
  1739. INIT_LIST_HEAD(&echan->node);
  1740. for (j = 0; j < EDMA_MAX_SLOTS; j++)
  1741. echan->slot[j] = -1;
  1742. }
  1743. }
  1744. static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
  1745. struct edma_cc *ecc)
  1746. {
  1747. int i;
  1748. u32 value, cccfg;
  1749. s8 (*queue_priority_map)[2];
  1750. /* Decode the eDMA3 configuration from CCCFG register */
  1751. cccfg = edma_read(ecc, EDMA_CCCFG);
  1752. value = GET_NUM_REGN(cccfg);
  1753. ecc->num_region = BIT(value);
  1754. value = GET_NUM_DMACH(cccfg);
  1755. ecc->num_channels = BIT(value + 1);
  1756. value = GET_NUM_QDMACH(cccfg);
  1757. ecc->num_qchannels = value * 2;
  1758. value = GET_NUM_PAENTRY(cccfg);
  1759. ecc->num_slots = BIT(value + 4);
  1760. value = GET_NUM_EVQUE(cccfg);
  1761. ecc->num_tc = value + 1;
  1762. ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
  1763. dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
  1764. dev_dbg(dev, "num_region: %u\n", ecc->num_region);
  1765. dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
  1766. dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
  1767. dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
  1768. dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
  1769. dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
  1770. /* Nothing need to be done if queue priority is provided */
  1771. if (pdata->queue_priority_mapping)
  1772. return 0;
  1773. /*
  1774. * Configure TC/queue priority as follows:
  1775. * Q0 - priority 0
  1776. * Q1 - priority 1
  1777. * Q2 - priority 2
  1778. * ...
  1779. * The meaning of priority numbers: 0 highest priority, 7 lowest
  1780. * priority. So Q0 is the highest priority queue and the last queue has
  1781. * the lowest priority.
  1782. */
  1783. queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
  1784. GFP_KERNEL);
  1785. if (!queue_priority_map)
  1786. return -ENOMEM;
  1787. for (i = 0; i < ecc->num_tc; i++) {
  1788. queue_priority_map[i][0] = i;
  1789. queue_priority_map[i][1] = i;
  1790. }
  1791. queue_priority_map[i][0] = -1;
  1792. queue_priority_map[i][1] = -1;
  1793. pdata->queue_priority_mapping = queue_priority_map;
  1794. /* Default queue has the lowest priority */
  1795. pdata->default_queue = i - 1;
  1796. return 0;
  1797. }
  1798. #if IS_ENABLED(CONFIG_OF)
  1799. static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
  1800. size_t sz)
  1801. {
  1802. const char pname[] = "ti,edma-xbar-event-map";
  1803. struct resource res;
  1804. void __iomem *xbar;
  1805. s16 (*xbar_chans)[2];
  1806. size_t nelm = sz / sizeof(s16);
  1807. u32 shift, offset, mux;
  1808. int ret, i;
  1809. xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
  1810. if (!xbar_chans)
  1811. return -ENOMEM;
  1812. ret = of_address_to_resource(dev->of_node, 1, &res);
  1813. if (ret)
  1814. return -ENOMEM;
  1815. xbar = devm_ioremap(dev, res.start, resource_size(&res));
  1816. if (!xbar)
  1817. return -ENOMEM;
  1818. ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
  1819. nelm);
  1820. if (ret)
  1821. return -EIO;
  1822. /* Invalidate last entry for the other user of this mess */
  1823. nelm >>= 1;
  1824. xbar_chans[nelm][0] = -1;
  1825. xbar_chans[nelm][1] = -1;
  1826. for (i = 0; i < nelm; i++) {
  1827. shift = (xbar_chans[i][1] & 0x03) << 3;
  1828. offset = xbar_chans[i][1] & 0xfffffffc;
  1829. mux = readl(xbar + offset);
  1830. mux &= ~(0xff << shift);
  1831. mux |= xbar_chans[i][0] << shift;
  1832. writel(mux, (xbar + offset));
  1833. }
  1834. pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
  1835. return 0;
  1836. }
  1837. static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
  1838. bool legacy_mode)
  1839. {
  1840. struct edma_soc_info *info;
  1841. struct property *prop;
  1842. int sz, ret;
  1843. info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
  1844. if (!info)
  1845. return ERR_PTR(-ENOMEM);
  1846. if (legacy_mode) {
  1847. prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
  1848. &sz);
  1849. if (prop) {
  1850. ret = edma_xbar_event_map(dev, info, sz);
  1851. if (ret)
  1852. return ERR_PTR(ret);
  1853. }
  1854. return info;
  1855. }
  1856. /* Get the list of channels allocated to be used for memcpy */
  1857. prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
  1858. if (prop) {
  1859. const char pname[] = "ti,edma-memcpy-channels";
  1860. size_t nelm = sz / sizeof(s32);
  1861. s32 *memcpy_ch;
  1862. memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
  1863. GFP_KERNEL);
  1864. if (!memcpy_ch)
  1865. return ERR_PTR(-ENOMEM);
  1866. ret = of_property_read_u32_array(dev->of_node, pname,
  1867. (u32 *)memcpy_ch, nelm);
  1868. if (ret)
  1869. return ERR_PTR(ret);
  1870. memcpy_ch[nelm] = -1;
  1871. info->memcpy_channels = memcpy_ch;
  1872. }
  1873. prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
  1874. &sz);
  1875. if (prop) {
  1876. const char pname[] = "ti,edma-reserved-slot-ranges";
  1877. u32 (*tmp)[2];
  1878. s16 (*rsv_slots)[2];
  1879. size_t nelm = sz / sizeof(*tmp);
  1880. struct edma_rsv_info *rsv_info;
  1881. int i;
  1882. if (!nelm)
  1883. return info;
  1884. tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
  1885. if (!tmp)
  1886. return ERR_PTR(-ENOMEM);
  1887. rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
  1888. if (!rsv_info) {
  1889. kfree(tmp);
  1890. return ERR_PTR(-ENOMEM);
  1891. }
  1892. rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
  1893. GFP_KERNEL);
  1894. if (!rsv_slots) {
  1895. kfree(tmp);
  1896. return ERR_PTR(-ENOMEM);
  1897. }
  1898. ret = of_property_read_u32_array(dev->of_node, pname,
  1899. (u32 *)tmp, nelm * 2);
  1900. if (ret) {
  1901. kfree(tmp);
  1902. return ERR_PTR(ret);
  1903. }
  1904. for (i = 0; i < nelm; i++) {
  1905. rsv_slots[i][0] = tmp[i][0];
  1906. rsv_slots[i][1] = tmp[i][1];
  1907. }
  1908. rsv_slots[nelm][0] = -1;
  1909. rsv_slots[nelm][1] = -1;
  1910. info->rsv = rsv_info;
  1911. info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
  1912. kfree(tmp);
  1913. }
  1914. return info;
  1915. }
  1916. static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
  1917. struct of_dma *ofdma)
  1918. {
  1919. struct edma_cc *ecc = ofdma->of_dma_data;
  1920. struct dma_chan *chan = NULL;
  1921. struct edma_chan *echan;
  1922. int i;
  1923. if (!ecc || dma_spec->args_count < 1)
  1924. return NULL;
  1925. for (i = 0; i < ecc->num_channels; i++) {
  1926. echan = &ecc->slave_chans[i];
  1927. if (echan->ch_num == dma_spec->args[0]) {
  1928. chan = &echan->vchan.chan;
  1929. break;
  1930. }
  1931. }
  1932. if (!chan)
  1933. return NULL;
  1934. if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
  1935. goto out;
  1936. if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
  1937. dma_spec->args[1] < echan->ecc->num_tc) {
  1938. echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
  1939. goto out;
  1940. }
  1941. return NULL;
  1942. out:
  1943. /* The channel is going to be used as HW synchronized */
  1944. echan->hw_triggered = true;
  1945. return dma_get_slave_channel(chan);
  1946. }
  1947. #else
  1948. static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
  1949. bool legacy_mode)
  1950. {
  1951. return ERR_PTR(-EINVAL);
  1952. }
  1953. static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
  1954. struct of_dma *ofdma)
  1955. {
  1956. return NULL;
  1957. }
  1958. #endif
  1959. static bool edma_filter_fn(struct dma_chan *chan, void *param);
  1960. static int edma_probe(struct platform_device *pdev)
  1961. {
  1962. struct edma_soc_info *info = pdev->dev.platform_data;
  1963. s8 (*queue_priority_mapping)[2];
  1964. const s16 (*reserved)[2];
  1965. int i, irq;
  1966. char *irq_name;
  1967. struct resource *mem;
  1968. struct device_node *node = pdev->dev.of_node;
  1969. struct device *dev = &pdev->dev;
  1970. struct edma_cc *ecc;
  1971. bool legacy_mode = true;
  1972. int ret;
  1973. if (node) {
  1974. const struct of_device_id *match;
  1975. match = of_match_node(edma_of_ids, node);
  1976. if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
  1977. legacy_mode = false;
  1978. info = edma_setup_info_from_dt(dev, legacy_mode);
  1979. if (IS_ERR(info)) {
  1980. dev_err(dev, "failed to get DT data\n");
  1981. return PTR_ERR(info);
  1982. }
  1983. }
  1984. if (!info)
  1985. return -ENODEV;
  1986. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  1987. if (ret)
  1988. return ret;
  1989. ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
  1990. if (!ecc)
  1991. return -ENOMEM;
  1992. ecc->dev = dev;
  1993. ecc->id = pdev->id;
  1994. ecc->legacy_mode = legacy_mode;
  1995. /* When booting with DT the pdev->id is -1 */
  1996. if (ecc->id < 0)
  1997. ecc->id = 0;
  1998. mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
  1999. if (!mem) {
  2000. dev_dbg(dev, "mem resource not found, using index 0\n");
  2001. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2002. if (!mem) {
  2003. dev_err(dev, "no mem resource?\n");
  2004. return -ENODEV;
  2005. }
  2006. }
  2007. ecc->base = devm_ioremap_resource(dev, mem);
  2008. if (IS_ERR(ecc->base))
  2009. return PTR_ERR(ecc->base);
  2010. platform_set_drvdata(pdev, ecc);
  2011. pm_runtime_enable(dev);
  2012. ret = pm_runtime_get_sync(dev);
  2013. if (ret < 0) {
  2014. dev_err(dev, "pm_runtime_get_sync() failed\n");
  2015. pm_runtime_disable(dev);
  2016. return ret;
  2017. }
  2018. /* Get eDMA3 configuration from IP */
  2019. ret = edma_setup_from_hw(dev, info, ecc);
  2020. if (ret)
  2021. goto err_disable_pm;
  2022. /* Allocate memory based on the information we got from the IP */
  2023. ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
  2024. sizeof(*ecc->slave_chans), GFP_KERNEL);
  2025. ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
  2026. sizeof(unsigned long), GFP_KERNEL);
  2027. ecc->channels_mask = devm_kcalloc(dev,
  2028. BITS_TO_LONGS(ecc->num_channels),
  2029. sizeof(unsigned long), GFP_KERNEL);
  2030. if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
  2031. ret = -ENOMEM;
  2032. goto err_disable_pm;
  2033. }
  2034. /* Mark all channels available initially */
  2035. bitmap_fill(ecc->channels_mask, ecc->num_channels);
  2036. ecc->default_queue = info->default_queue;
  2037. if (info->rsv) {
  2038. /* Set the reserved slots in inuse list */
  2039. reserved = info->rsv->rsv_slots;
  2040. if (reserved) {
  2041. for (i = 0; reserved[i][0] != -1; i++)
  2042. bitmap_set(ecc->slot_inuse, reserved[i][0],
  2043. reserved[i][1]);
  2044. }
  2045. /* Clear channels not usable for Linux */
  2046. reserved = info->rsv->rsv_chans;
  2047. if (reserved) {
  2048. for (i = 0; reserved[i][0] != -1; i++)
  2049. bitmap_clear(ecc->channels_mask, reserved[i][0],
  2050. reserved[i][1]);
  2051. }
  2052. }
  2053. for (i = 0; i < ecc->num_slots; i++) {
  2054. /* Reset only unused - not reserved - paRAM slots */
  2055. if (!test_bit(i, ecc->slot_inuse))
  2056. edma_write_slot(ecc, i, &dummy_paramset);
  2057. }
  2058. irq = platform_get_irq_byname(pdev, "edma3_ccint");
  2059. if (irq < 0 && node)
  2060. irq = irq_of_parse_and_map(node, 0);
  2061. if (irq > 0) {
  2062. irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
  2063. dev_name(dev));
  2064. ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
  2065. ecc);
  2066. if (ret) {
  2067. dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
  2068. goto err_disable_pm;
  2069. }
  2070. ecc->ccint = irq;
  2071. }
  2072. irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
  2073. if (irq < 0 && node)
  2074. irq = irq_of_parse_and_map(node, 2);
  2075. if (irq > 0) {
  2076. irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
  2077. dev_name(dev));
  2078. ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
  2079. ecc);
  2080. if (ret) {
  2081. dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
  2082. goto err_disable_pm;
  2083. }
  2084. ecc->ccerrint = irq;
  2085. }
  2086. ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
  2087. if (ecc->dummy_slot < 0) {
  2088. dev_err(dev, "Can't allocate PaRAM dummy slot\n");
  2089. ret = ecc->dummy_slot;
  2090. goto err_disable_pm;
  2091. }
  2092. queue_priority_mapping = info->queue_priority_mapping;
  2093. if (!ecc->legacy_mode) {
  2094. int lowest_priority = 0;
  2095. unsigned int array_max;
  2096. struct of_phandle_args tc_args;
  2097. ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
  2098. sizeof(*ecc->tc_list), GFP_KERNEL);
  2099. if (!ecc->tc_list) {
  2100. ret = -ENOMEM;
  2101. goto err_reg1;
  2102. }
  2103. for (i = 0;; i++) {
  2104. ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
  2105. 1, i, &tc_args);
  2106. if (ret || i == ecc->num_tc)
  2107. break;
  2108. ecc->tc_list[i].node = tc_args.np;
  2109. ecc->tc_list[i].id = i;
  2110. queue_priority_mapping[i][1] = tc_args.args[0];
  2111. if (queue_priority_mapping[i][1] > lowest_priority) {
  2112. lowest_priority = queue_priority_mapping[i][1];
  2113. info->default_queue = i;
  2114. }
  2115. }
  2116. /* See if we have optional dma-channel-mask array */
  2117. array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
  2118. ret = of_property_read_variable_u32_array(node,
  2119. "dma-channel-mask",
  2120. (u32 *)ecc->channels_mask,
  2121. 1, array_max);
  2122. if (ret > 0 && ret != array_max)
  2123. dev_warn(dev, "dma-channel-mask is not complete.\n");
  2124. else if (ret == -EOVERFLOW || ret == -ENODATA)
  2125. dev_warn(dev,
  2126. "dma-channel-mask is out of range or empty\n");
  2127. }
  2128. /* Event queue priority mapping */
  2129. for (i = 0; queue_priority_mapping[i][0] != -1; i++)
  2130. edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
  2131. queue_priority_mapping[i][1]);
  2132. edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
  2133. edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
  2134. edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
  2135. ecc->info = info;
  2136. /* Init the dma device and channels */
  2137. edma_dma_init(ecc, legacy_mode);
  2138. for (i = 0; i < ecc->num_channels; i++) {
  2139. /* Do not touch reserved channels */
  2140. if (!test_bit(i, ecc->channels_mask))
  2141. continue;
  2142. /* Assign all channels to the default queue */
  2143. edma_assign_channel_eventq(&ecc->slave_chans[i],
  2144. info->default_queue);
  2145. /* Set entry slot to the dummy slot */
  2146. edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
  2147. }
  2148. ecc->dma_slave.filter.map = info->slave_map;
  2149. ecc->dma_slave.filter.mapcnt = info->slavecnt;
  2150. ecc->dma_slave.filter.fn = edma_filter_fn;
  2151. ret = dma_async_device_register(&ecc->dma_slave);
  2152. if (ret) {
  2153. dev_err(dev, "slave ddev registration failed (%d)\n", ret);
  2154. goto err_reg1;
  2155. }
  2156. if (ecc->dma_memcpy) {
  2157. ret = dma_async_device_register(ecc->dma_memcpy);
  2158. if (ret) {
  2159. dev_err(dev, "memcpy ddev registration failed (%d)\n",
  2160. ret);
  2161. dma_async_device_unregister(&ecc->dma_slave);
  2162. goto err_reg1;
  2163. }
  2164. }
  2165. if (node)
  2166. of_dma_controller_register(node, of_edma_xlate, ecc);
  2167. dev_info(dev, "TI EDMA DMA engine driver\n");
  2168. return 0;
  2169. err_reg1:
  2170. edma_free_slot(ecc, ecc->dummy_slot);
  2171. err_disable_pm:
  2172. pm_runtime_put_sync(dev);
  2173. pm_runtime_disable(dev);
  2174. return ret;
  2175. }
  2176. static void edma_cleanupp_vchan(struct dma_device *dmadev)
  2177. {
  2178. struct edma_chan *echan, *_echan;
  2179. list_for_each_entry_safe(echan, _echan,
  2180. &dmadev->channels, vchan.chan.device_node) {
  2181. list_del(&echan->vchan.chan.device_node);
  2182. tasklet_kill(&echan->vchan.task);
  2183. }
  2184. }
  2185. static int edma_remove(struct platform_device *pdev)
  2186. {
  2187. struct device *dev = &pdev->dev;
  2188. struct edma_cc *ecc = dev_get_drvdata(dev);
  2189. devm_free_irq(dev, ecc->ccint, ecc);
  2190. devm_free_irq(dev, ecc->ccerrint, ecc);
  2191. edma_cleanupp_vchan(&ecc->dma_slave);
  2192. if (dev->of_node)
  2193. of_dma_controller_free(dev->of_node);
  2194. dma_async_device_unregister(&ecc->dma_slave);
  2195. if (ecc->dma_memcpy)
  2196. dma_async_device_unregister(ecc->dma_memcpy);
  2197. edma_free_slot(ecc, ecc->dummy_slot);
  2198. pm_runtime_put_sync(dev);
  2199. pm_runtime_disable(dev);
  2200. return 0;
  2201. }
  2202. #ifdef CONFIG_PM_SLEEP
  2203. static int edma_pm_suspend(struct device *dev)
  2204. {
  2205. struct edma_cc *ecc = dev_get_drvdata(dev);
  2206. struct edma_chan *echan = ecc->slave_chans;
  2207. int i;
  2208. for (i = 0; i < ecc->num_channels; i++) {
  2209. if (echan[i].alloced)
  2210. edma_setup_interrupt(&echan[i], false);
  2211. }
  2212. return 0;
  2213. }
  2214. static int edma_pm_resume(struct device *dev)
  2215. {
  2216. struct edma_cc *ecc = dev_get_drvdata(dev);
  2217. struct edma_chan *echan = ecc->slave_chans;
  2218. int i;
  2219. s8 (*queue_priority_mapping)[2];
  2220. /* re initialize dummy slot to dummy param set */
  2221. edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
  2222. queue_priority_mapping = ecc->info->queue_priority_mapping;
  2223. /* Event queue priority mapping */
  2224. for (i = 0; queue_priority_mapping[i][0] != -1; i++)
  2225. edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
  2226. queue_priority_mapping[i][1]);
  2227. for (i = 0; i < ecc->num_channels; i++) {
  2228. if (echan[i].alloced) {
  2229. /* ensure access through shadow region 0 */
  2230. edma_or_array2(ecc, EDMA_DRAE, 0,
  2231. EDMA_REG_ARRAY_INDEX(i),
  2232. EDMA_CHANNEL_BIT(i));
  2233. edma_setup_interrupt(&echan[i], true);
  2234. /* Set up channel -> slot mapping for the entry slot */
  2235. edma_set_chmap(&echan[i], echan[i].slot[0]);
  2236. }
  2237. }
  2238. return 0;
  2239. }
  2240. #endif
  2241. static const struct dev_pm_ops edma_pm_ops = {
  2242. SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
  2243. };
  2244. static struct platform_driver edma_driver = {
  2245. .probe = edma_probe,
  2246. .remove = edma_remove,
  2247. .driver = {
  2248. .name = "edma",
  2249. .pm = &edma_pm_ops,
  2250. .of_match_table = edma_of_ids,
  2251. },
  2252. };
  2253. static int edma_tptc_probe(struct platform_device *pdev)
  2254. {
  2255. pm_runtime_enable(&pdev->dev);
  2256. return pm_runtime_get_sync(&pdev->dev);
  2257. }
  2258. static struct platform_driver edma_tptc_driver = {
  2259. .probe = edma_tptc_probe,
  2260. .driver = {
  2261. .name = "edma3-tptc",
  2262. .of_match_table = edma_tptc_of_ids,
  2263. },
  2264. };
  2265. static bool edma_filter_fn(struct dma_chan *chan, void *param)
  2266. {
  2267. bool match = false;
  2268. if (chan->device->dev->driver == &edma_driver.driver) {
  2269. struct edma_chan *echan = to_edma_chan(chan);
  2270. unsigned ch_req = *(unsigned *)param;
  2271. if (ch_req == echan->ch_num) {
  2272. /* The channel is going to be used as HW synchronized */
  2273. echan->hw_triggered = true;
  2274. match = true;
  2275. }
  2276. }
  2277. return match;
  2278. }
  2279. static int edma_init(void)
  2280. {
  2281. int ret;
  2282. ret = platform_driver_register(&edma_tptc_driver);
  2283. if (ret)
  2284. return ret;
  2285. return platform_driver_register(&edma_driver);
  2286. }
  2287. subsys_initcall(edma_init);
  2288. static void __exit edma_exit(void)
  2289. {
  2290. platform_driver_unregister(&edma_driver);
  2291. platform_driver_unregister(&edma_tptc_driver);
  2292. }
  2293. module_exit(edma_exit);
  2294. MODULE_AUTHOR("Matt Porter <[email protected]>");
  2295. MODULE_DESCRIPTION("TI EDMA DMA engine driver");
  2296. MODULE_LICENSE("GPL v2");