at_xdmac.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
  4. *
  5. * Copyright (C) 2014 Atmel Corporation
  6. *
  7. * Author: Ludovic Desroches <[email protected]>
  8. */
  9. #include <asm/barrier.h>
  10. #include <dt-bindings/dma/at91.h>
  11. #include <linux/clk.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/irq.h>
  16. #include <linux/kernel.h>
  17. #include <linux/list.h>
  18. #include <linux/module.h>
  19. #include <linux/of_dma.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm.h>
  23. #include "dmaengine.h"
  24. /* Global registers */
  25. #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
  26. #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
  27. #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
  28. #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
  29. #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
  30. #define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
  31. #define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
  32. #define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
  33. #define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
  34. #define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
  35. #define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
  36. #define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
  37. #define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
  38. #define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
  39. AT_XDMAC_WRHP(0x5))
  40. #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
  41. #define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
  42. #define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
  43. #define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
  44. #define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
  45. #define AT_XDMAC_GWAC_M2M 0
  46. #define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
  47. #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
  48. #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
  49. #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
  50. #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
  51. #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
  52. #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
  53. #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
  54. #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
  55. /* Channel relative registers offsets */
  56. #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
  57. #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
  58. #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
  59. #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
  60. #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
  61. #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
  62. #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
  63. #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
  64. #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
  65. #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
  66. #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
  67. #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
  68. #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
  69. #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
  70. #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
  71. #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
  72. #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
  73. #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
  74. #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
  75. #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
  76. #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
  77. #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
  78. #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
  79. #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
  80. #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
  81. #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
  82. #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
  83. #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
  84. #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
  85. #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
  86. #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
  87. #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
  88. #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
  89. #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
  90. #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
  91. #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
  92. #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
  93. #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
  94. #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
  95. #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
  96. #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
  97. #define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
  98. #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
  99. #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
  100. #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
  101. #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
  102. #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
  103. #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
  104. #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
  105. #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
  106. #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
  107. #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
  108. #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
  109. #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
  110. #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
  111. #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
  112. #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
  113. #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
  114. #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
  115. #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
  116. #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
  117. #define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
  118. #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
  119. #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
  120. #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
  121. #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
  122. #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
  123. #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
  124. #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
  125. #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
  126. #define AT_XDMAC_CC_DWIDTH_OFFSET 11
  127. #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
  128. #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
  129. #define AT_XDMAC_CC_DWIDTH_BYTE 0x0
  130. #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
  131. #define AT_XDMAC_CC_DWIDTH_WORD 0x2
  132. #define AT_XDMAC_CC_DWIDTH_DWORD 0x3
  133. #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
  134. #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
  135. #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
  136. #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
  137. #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
  138. #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
  139. #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
  140. #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
  141. #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
  142. #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
  143. #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
  144. #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
  145. #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
  146. #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
  147. #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
  148. #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
  149. #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
  150. #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
  151. #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
  152. #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
  153. #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
  154. #define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
  155. #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
  156. #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
  157. #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
  158. /* Microblock control members */
  159. #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
  160. #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
  161. #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
  162. #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
  163. #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
  164. #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
  165. #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
  166. #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
  167. #define AT_XDMAC_MAX_CHAN 0x20
  168. #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
  169. #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
  170. #define AT_XDMAC_RESIDUE_MAX_RETRIES 5
  171. #define AT_XDMAC_DMA_BUSWIDTHS\
  172. (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
  173. BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
  174. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
  175. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
  176. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
  177. enum atc_status {
  178. AT_XDMAC_CHAN_IS_CYCLIC = 0,
  179. AT_XDMAC_CHAN_IS_PAUSED,
  180. };
  181. struct at_xdmac_layout {
  182. /* Global Channel Read Suspend Register */
  183. u8 grs;
  184. /* Global Write Suspend Register */
  185. u8 gws;
  186. /* Global Channel Read Write Suspend Register */
  187. u8 grws;
  188. /* Global Channel Read Write Resume Register */
  189. u8 grwr;
  190. /* Global Channel Software Request Register */
  191. u8 gswr;
  192. /* Global channel Software Request Status Register */
  193. u8 gsws;
  194. /* Global Channel Software Flush Request Register */
  195. u8 gswf;
  196. /* Channel reg base */
  197. u8 chan_cc_reg_base;
  198. /* Source/Destination Interface must be specified or not */
  199. bool sdif;
  200. /* AXI queue priority configuration supported */
  201. bool axi_config;
  202. };
  203. /* ----- Channels ----- */
  204. struct at_xdmac_chan {
  205. struct dma_chan chan;
  206. void __iomem *ch_regs;
  207. u32 mask; /* Channel Mask */
  208. u32 cfg; /* Channel Configuration Register */
  209. u8 perid; /* Peripheral ID */
  210. u8 perif; /* Peripheral Interface */
  211. u8 memif; /* Memory Interface */
  212. u32 save_cc;
  213. u32 save_cim;
  214. u32 save_cnda;
  215. u32 save_cndc;
  216. u32 irq_status;
  217. unsigned long status;
  218. struct tasklet_struct tasklet;
  219. struct dma_slave_config sconfig;
  220. spinlock_t lock;
  221. struct list_head xfers_list;
  222. struct list_head free_descs_list;
  223. };
  224. /* ----- Controller ----- */
  225. struct at_xdmac {
  226. struct dma_device dma;
  227. void __iomem *regs;
  228. int irq;
  229. struct clk *clk;
  230. u32 save_gim;
  231. u32 save_gs;
  232. struct dma_pool *at_xdmac_desc_pool;
  233. const struct at_xdmac_layout *layout;
  234. struct at_xdmac_chan chan[];
  235. };
  236. /* ----- Descriptors ----- */
  237. /* Linked List Descriptor */
  238. struct at_xdmac_lld {
  239. u32 mbr_nda; /* Next Descriptor Member */
  240. u32 mbr_ubc; /* Microblock Control Member */
  241. u32 mbr_sa; /* Source Address Member */
  242. u32 mbr_da; /* Destination Address Member */
  243. u32 mbr_cfg; /* Configuration Register */
  244. u32 mbr_bc; /* Block Control Register */
  245. u32 mbr_ds; /* Data Stride Register */
  246. u32 mbr_sus; /* Source Microblock Stride Register */
  247. u32 mbr_dus; /* Destination Microblock Stride Register */
  248. };
  249. /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
  250. struct at_xdmac_desc {
  251. struct at_xdmac_lld lld;
  252. enum dma_transfer_direction direction;
  253. struct dma_async_tx_descriptor tx_dma_desc;
  254. struct list_head desc_node;
  255. /* Following members are only used by the first descriptor */
  256. bool active_xfer;
  257. unsigned int xfer_size;
  258. struct list_head descs_list;
  259. struct list_head xfer_node;
  260. } __aligned(sizeof(u64));
  261. static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
  262. .grs = 0x28,
  263. .gws = 0x2C,
  264. .grws = 0x30,
  265. .grwr = 0x34,
  266. .gswr = 0x38,
  267. .gsws = 0x3C,
  268. .gswf = 0x40,
  269. .chan_cc_reg_base = 0x50,
  270. .sdif = true,
  271. .axi_config = false,
  272. };
  273. static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
  274. .grs = 0x30,
  275. .gws = 0x38,
  276. .grws = 0x40,
  277. .grwr = 0x44,
  278. .gswr = 0x48,
  279. .gsws = 0x4C,
  280. .gswf = 0x50,
  281. .chan_cc_reg_base = 0x60,
  282. .sdif = false,
  283. .axi_config = true,
  284. };
  285. static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
  286. {
  287. return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
  288. }
  289. #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
  290. #define at_xdmac_write(atxdmac, reg, value) \
  291. writel_relaxed((value), (atxdmac)->regs + (reg))
  292. #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
  293. #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
  294. static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
  295. {
  296. return container_of(dchan, struct at_xdmac_chan, chan);
  297. }
  298. static struct device *chan2dev(struct dma_chan *chan)
  299. {
  300. return &chan->dev->device;
  301. }
  302. static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
  303. {
  304. return container_of(ddev, struct at_xdmac, dma);
  305. }
  306. static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
  307. {
  308. return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
  309. }
  310. static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
  311. {
  312. return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
  313. }
  314. static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
  315. {
  316. return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
  317. }
  318. static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
  319. {
  320. return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
  321. }
  322. static inline u8 at_xdmac_get_dwidth(u32 cfg)
  323. {
  324. return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
  325. };
  326. static unsigned int init_nr_desc_per_channel = 64;
  327. module_param(init_nr_desc_per_channel, uint, 0644);
  328. MODULE_PARM_DESC(init_nr_desc_per_channel,
  329. "initial descriptors per channel (default: 64)");
  330. static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
  331. {
  332. return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
  333. }
  334. static void at_xdmac_off(struct at_xdmac *atxdmac)
  335. {
  336. at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
  337. /* Wait that all chans are disabled. */
  338. while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
  339. cpu_relax();
  340. at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
  341. }
  342. /* Call with lock hold. */
  343. static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
  344. struct at_xdmac_desc *first)
  345. {
  346. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  347. u32 reg;
  348. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
  349. /* Set transfer as active to not try to start it again. */
  350. first->active_xfer = true;
  351. /* Tell xdmac where to get the first descriptor. */
  352. reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
  353. if (atxdmac->layout->sdif)
  354. reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
  355. at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
  356. /*
  357. * When doing non cyclic transfer we need to use the next
  358. * descriptor view 2 since some fields of the configuration register
  359. * depend on transfer size and src/dest addresses.
  360. */
  361. if (at_xdmac_chan_is_cyclic(atchan))
  362. reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
  363. else if ((first->lld.mbr_ubc &
  364. AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
  365. reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
  366. else
  367. reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
  368. /*
  369. * Even if the register will be updated from the configuration in the
  370. * descriptor when using view 2 or higher, the PROT bit won't be set
  371. * properly. This bit can be modified only by using the channel
  372. * configuration register.
  373. */
  374. at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
  375. reg |= AT_XDMAC_CNDC_NDDUP
  376. | AT_XDMAC_CNDC_NDSUP
  377. | AT_XDMAC_CNDC_NDE;
  378. at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
  379. dev_vdbg(chan2dev(&atchan->chan),
  380. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  381. __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  382. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  383. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  384. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  385. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  386. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  387. at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
  388. reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
  389. /*
  390. * Request Overflow Error is only for peripheral synchronized transfers
  391. */
  392. if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
  393. reg |= AT_XDMAC_CIE_ROIE;
  394. /*
  395. * There is no end of list when doing cyclic dma, we need to get
  396. * an interrupt after each periods.
  397. */
  398. if (at_xdmac_chan_is_cyclic(atchan))
  399. at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
  400. reg | AT_XDMAC_CIE_BIE);
  401. else
  402. at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
  403. reg | AT_XDMAC_CIE_LIE);
  404. at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
  405. dev_vdbg(chan2dev(&atchan->chan),
  406. "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
  407. wmb();
  408. at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
  409. dev_vdbg(chan2dev(&atchan->chan),
  410. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  411. __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  412. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  413. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  414. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  415. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  416. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  417. }
  418. static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
  419. {
  420. struct at_xdmac_desc *desc = txd_to_at_desc(tx);
  421. struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
  422. dma_cookie_t cookie;
  423. unsigned long irqflags;
  424. spin_lock_irqsave(&atchan->lock, irqflags);
  425. cookie = dma_cookie_assign(tx);
  426. list_add_tail(&desc->xfer_node, &atchan->xfers_list);
  427. spin_unlock_irqrestore(&atchan->lock, irqflags);
  428. dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
  429. __func__, atchan, desc);
  430. return cookie;
  431. }
  432. static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
  433. gfp_t gfp_flags)
  434. {
  435. struct at_xdmac_desc *desc;
  436. struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
  437. dma_addr_t phys;
  438. desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
  439. if (desc) {
  440. INIT_LIST_HEAD(&desc->descs_list);
  441. dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
  442. desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
  443. desc->tx_dma_desc.phys = phys;
  444. }
  445. return desc;
  446. }
  447. static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
  448. {
  449. memset(&desc->lld, 0, sizeof(desc->lld));
  450. INIT_LIST_HEAD(&desc->descs_list);
  451. desc->direction = DMA_TRANS_NONE;
  452. desc->xfer_size = 0;
  453. desc->active_xfer = false;
  454. }
  455. /* Call must be protected by lock. */
  456. static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
  457. {
  458. struct at_xdmac_desc *desc;
  459. if (list_empty(&atchan->free_descs_list)) {
  460. desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
  461. } else {
  462. desc = list_first_entry(&atchan->free_descs_list,
  463. struct at_xdmac_desc, desc_node);
  464. list_del(&desc->desc_node);
  465. at_xdmac_init_used_desc(desc);
  466. }
  467. return desc;
  468. }
  469. static void at_xdmac_queue_desc(struct dma_chan *chan,
  470. struct at_xdmac_desc *prev,
  471. struct at_xdmac_desc *desc)
  472. {
  473. if (!prev || !desc)
  474. return;
  475. prev->lld.mbr_nda = desc->tx_dma_desc.phys;
  476. prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
  477. dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
  478. __func__, prev, &prev->lld.mbr_nda);
  479. }
  480. static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
  481. struct at_xdmac_desc *desc)
  482. {
  483. if (!desc)
  484. return;
  485. desc->lld.mbr_bc++;
  486. dev_dbg(chan2dev(chan),
  487. "%s: incrementing the block count of the desc 0x%p\n",
  488. __func__, desc);
  489. }
  490. static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
  491. struct of_dma *of_dma)
  492. {
  493. struct at_xdmac *atxdmac = of_dma->of_dma_data;
  494. struct at_xdmac_chan *atchan;
  495. struct dma_chan *chan;
  496. struct device *dev = atxdmac->dma.dev;
  497. if (dma_spec->args_count != 1) {
  498. dev_err(dev, "dma phandler args: bad number of args\n");
  499. return NULL;
  500. }
  501. chan = dma_get_any_slave_channel(&atxdmac->dma);
  502. if (!chan) {
  503. dev_err(dev, "can't get a dma channel\n");
  504. return NULL;
  505. }
  506. atchan = to_at_xdmac_chan(chan);
  507. atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
  508. atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
  509. atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
  510. dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
  511. atchan->memif, atchan->perif, atchan->perid);
  512. return chan;
  513. }
  514. static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
  515. enum dma_transfer_direction direction)
  516. {
  517. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  518. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  519. int csize, dwidth;
  520. if (direction == DMA_DEV_TO_MEM) {
  521. atchan->cfg =
  522. AT91_XDMAC_DT_PERID(atchan->perid)
  523. | AT_XDMAC_CC_DAM_INCREMENTED_AM
  524. | AT_XDMAC_CC_SAM_FIXED_AM
  525. | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
  526. | AT_XDMAC_CC_DSYNC_PER2MEM
  527. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  528. | AT_XDMAC_CC_TYPE_PER_TRAN;
  529. if (atxdmac->layout->sdif)
  530. atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
  531. AT_XDMAC_CC_SIF(atchan->perif);
  532. csize = ffs(atchan->sconfig.src_maxburst) - 1;
  533. if (csize < 0) {
  534. dev_err(chan2dev(chan), "invalid src maxburst value\n");
  535. return -EINVAL;
  536. }
  537. atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
  538. dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
  539. if (dwidth < 0) {
  540. dev_err(chan2dev(chan), "invalid src addr width value\n");
  541. return -EINVAL;
  542. }
  543. atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
  544. } else if (direction == DMA_MEM_TO_DEV) {
  545. atchan->cfg =
  546. AT91_XDMAC_DT_PERID(atchan->perid)
  547. | AT_XDMAC_CC_DAM_FIXED_AM
  548. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  549. | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
  550. | AT_XDMAC_CC_DSYNC_MEM2PER
  551. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  552. | AT_XDMAC_CC_TYPE_PER_TRAN;
  553. if (atxdmac->layout->sdif)
  554. atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
  555. AT_XDMAC_CC_SIF(atchan->memif);
  556. csize = ffs(atchan->sconfig.dst_maxburst) - 1;
  557. if (csize < 0) {
  558. dev_err(chan2dev(chan), "invalid src maxburst value\n");
  559. return -EINVAL;
  560. }
  561. atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
  562. dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
  563. if (dwidth < 0) {
  564. dev_err(chan2dev(chan), "invalid dst addr width value\n");
  565. return -EINVAL;
  566. }
  567. atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
  568. }
  569. dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
  570. return 0;
  571. }
  572. /*
  573. * Only check that maxburst and addr width values are supported by
  574. * the controller but not that the configuration is good to perform the
  575. * transfer since we don't know the direction at this stage.
  576. */
  577. static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
  578. {
  579. if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
  580. || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
  581. return -EINVAL;
  582. if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
  583. || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
  584. return -EINVAL;
  585. return 0;
  586. }
  587. static int at_xdmac_set_slave_config(struct dma_chan *chan,
  588. struct dma_slave_config *sconfig)
  589. {
  590. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  591. if (at_xdmac_check_slave_config(sconfig)) {
  592. dev_err(chan2dev(chan), "invalid slave configuration\n");
  593. return -EINVAL;
  594. }
  595. memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
  596. return 0;
  597. }
  598. static struct dma_async_tx_descriptor *
  599. at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  600. unsigned int sg_len, enum dma_transfer_direction direction,
  601. unsigned long flags, void *context)
  602. {
  603. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  604. struct at_xdmac_desc *first = NULL, *prev = NULL;
  605. struct scatterlist *sg;
  606. int i;
  607. unsigned int xfer_size = 0;
  608. unsigned long irqflags;
  609. struct dma_async_tx_descriptor *ret = NULL;
  610. if (!sgl)
  611. return NULL;
  612. if (!is_slave_direction(direction)) {
  613. dev_err(chan2dev(chan), "invalid DMA direction\n");
  614. return NULL;
  615. }
  616. dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
  617. __func__, sg_len,
  618. direction == DMA_MEM_TO_DEV ? "to device" : "from device",
  619. flags);
  620. /* Protect dma_sconfig field that can be modified by set_slave_conf. */
  621. spin_lock_irqsave(&atchan->lock, irqflags);
  622. if (at_xdmac_compute_chan_conf(chan, direction))
  623. goto spin_unlock;
  624. /* Prepare descriptors. */
  625. for_each_sg(sgl, sg, sg_len, i) {
  626. struct at_xdmac_desc *desc = NULL;
  627. u32 len, mem, dwidth, fixed_dwidth;
  628. len = sg_dma_len(sg);
  629. mem = sg_dma_address(sg);
  630. if (unlikely(!len)) {
  631. dev_err(chan2dev(chan), "sg data length is zero\n");
  632. goto spin_unlock;
  633. }
  634. dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
  635. __func__, i, len, mem);
  636. desc = at_xdmac_get_desc(atchan);
  637. if (!desc) {
  638. dev_err(chan2dev(chan), "can't get descriptor\n");
  639. if (first)
  640. list_splice_tail_init(&first->descs_list,
  641. &atchan->free_descs_list);
  642. goto spin_unlock;
  643. }
  644. /* Linked list descriptor setup. */
  645. if (direction == DMA_DEV_TO_MEM) {
  646. desc->lld.mbr_sa = atchan->sconfig.src_addr;
  647. desc->lld.mbr_da = mem;
  648. } else {
  649. desc->lld.mbr_sa = mem;
  650. desc->lld.mbr_da = atchan->sconfig.dst_addr;
  651. }
  652. dwidth = at_xdmac_get_dwidth(atchan->cfg);
  653. fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
  654. ? dwidth
  655. : AT_XDMAC_CC_DWIDTH_BYTE;
  656. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
  657. | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
  658. | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
  659. | (len >> fixed_dwidth); /* microblock length */
  660. desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
  661. AT_XDMAC_CC_DWIDTH(fixed_dwidth);
  662. dev_dbg(chan2dev(chan),
  663. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
  664. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
  665. /* Chain lld. */
  666. if (prev)
  667. at_xdmac_queue_desc(chan, prev, desc);
  668. prev = desc;
  669. if (!first)
  670. first = desc;
  671. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  672. __func__, desc, first);
  673. list_add_tail(&desc->desc_node, &first->descs_list);
  674. xfer_size += len;
  675. }
  676. first->tx_dma_desc.flags = flags;
  677. first->xfer_size = xfer_size;
  678. first->direction = direction;
  679. ret = &first->tx_dma_desc;
  680. spin_unlock:
  681. spin_unlock_irqrestore(&atchan->lock, irqflags);
  682. return ret;
  683. }
  684. static struct dma_async_tx_descriptor *
  685. at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
  686. size_t buf_len, size_t period_len,
  687. enum dma_transfer_direction direction,
  688. unsigned long flags)
  689. {
  690. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  691. struct at_xdmac_desc *first = NULL, *prev = NULL;
  692. unsigned int periods = buf_len / period_len;
  693. int i;
  694. unsigned long irqflags;
  695. dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
  696. __func__, &buf_addr, buf_len, period_len,
  697. direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
  698. if (!is_slave_direction(direction)) {
  699. dev_err(chan2dev(chan), "invalid DMA direction\n");
  700. return NULL;
  701. }
  702. if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
  703. dev_err(chan2dev(chan), "channel currently used\n");
  704. return NULL;
  705. }
  706. if (at_xdmac_compute_chan_conf(chan, direction))
  707. return NULL;
  708. for (i = 0; i < periods; i++) {
  709. struct at_xdmac_desc *desc = NULL;
  710. spin_lock_irqsave(&atchan->lock, irqflags);
  711. desc = at_xdmac_get_desc(atchan);
  712. if (!desc) {
  713. dev_err(chan2dev(chan), "can't get descriptor\n");
  714. if (first)
  715. list_splice_tail_init(&first->descs_list,
  716. &atchan->free_descs_list);
  717. spin_unlock_irqrestore(&atchan->lock, irqflags);
  718. return NULL;
  719. }
  720. spin_unlock_irqrestore(&atchan->lock, irqflags);
  721. dev_dbg(chan2dev(chan),
  722. "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
  723. __func__, desc, &desc->tx_dma_desc.phys);
  724. if (direction == DMA_DEV_TO_MEM) {
  725. desc->lld.mbr_sa = atchan->sconfig.src_addr;
  726. desc->lld.mbr_da = buf_addr + i * period_len;
  727. } else {
  728. desc->lld.mbr_sa = buf_addr + i * period_len;
  729. desc->lld.mbr_da = atchan->sconfig.dst_addr;
  730. }
  731. desc->lld.mbr_cfg = atchan->cfg;
  732. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
  733. | AT_XDMAC_MBR_UBC_NDEN
  734. | AT_XDMAC_MBR_UBC_NSEN
  735. | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
  736. dev_dbg(chan2dev(chan),
  737. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
  738. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
  739. /* Chain lld. */
  740. if (prev)
  741. at_xdmac_queue_desc(chan, prev, desc);
  742. prev = desc;
  743. if (!first)
  744. first = desc;
  745. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  746. __func__, desc, first);
  747. list_add_tail(&desc->desc_node, &first->descs_list);
  748. }
  749. at_xdmac_queue_desc(chan, prev, first);
  750. first->tx_dma_desc.flags = flags;
  751. first->xfer_size = buf_len;
  752. first->direction = direction;
  753. return &first->tx_dma_desc;
  754. }
  755. static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
  756. {
  757. u32 width;
  758. /*
  759. * Check address alignment to select the greater data width we
  760. * can use.
  761. *
  762. * Some XDMAC implementations don't provide dword transfer, in
  763. * this case selecting dword has the same behavior as
  764. * selecting word transfers.
  765. */
  766. if (!(addr & 7)) {
  767. width = AT_XDMAC_CC_DWIDTH_DWORD;
  768. dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
  769. } else if (!(addr & 3)) {
  770. width = AT_XDMAC_CC_DWIDTH_WORD;
  771. dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
  772. } else if (!(addr & 1)) {
  773. width = AT_XDMAC_CC_DWIDTH_HALFWORD;
  774. dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
  775. } else {
  776. width = AT_XDMAC_CC_DWIDTH_BYTE;
  777. dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
  778. }
  779. return width;
  780. }
  781. static struct at_xdmac_desc *
  782. at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
  783. struct at_xdmac_chan *atchan,
  784. struct at_xdmac_desc *prev,
  785. dma_addr_t src, dma_addr_t dst,
  786. struct dma_interleaved_template *xt,
  787. struct data_chunk *chunk)
  788. {
  789. struct at_xdmac_desc *desc;
  790. u32 dwidth;
  791. unsigned long flags;
  792. size_t ublen;
  793. /*
  794. * WARNING: The channel configuration is set here since there is no
  795. * dmaengine_slave_config call in this case. Moreover we don't know the
  796. * direction, it involves we can't dynamically set the source and dest
  797. * interface so we have to use the same one. Only interface 0 allows EBI
  798. * access. Hopefully we can access DDR through both ports (at least on
  799. * SAMA5D4x), so we can use the same interface for source and dest,
  800. * that solves the fact we don't know the direction.
  801. * ERRATA: Even if useless for memory transfers, the PERID has to not
  802. * match the one of another channel. If not, it could lead to spurious
  803. * flag status.
  804. * For SAMA7G5x case, the SIF and DIF fields are no longer used.
  805. * Thus, no need to have the SIF/DIF interfaces here.
  806. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
  807. * zero.
  808. */
  809. u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
  810. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  811. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  812. dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
  813. if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
  814. dev_dbg(chan2dev(chan),
  815. "%s: chunk too big (%zu, max size %lu)...\n",
  816. __func__, chunk->size,
  817. AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
  818. return NULL;
  819. }
  820. if (prev)
  821. dev_dbg(chan2dev(chan),
  822. "Adding items at the end of desc 0x%p\n", prev);
  823. if (xt->src_inc) {
  824. if (xt->src_sgl)
  825. chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
  826. else
  827. chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
  828. }
  829. if (xt->dst_inc) {
  830. if (xt->dst_sgl)
  831. chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
  832. else
  833. chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
  834. }
  835. spin_lock_irqsave(&atchan->lock, flags);
  836. desc = at_xdmac_get_desc(atchan);
  837. spin_unlock_irqrestore(&atchan->lock, flags);
  838. if (!desc) {
  839. dev_err(chan2dev(chan), "can't get descriptor\n");
  840. return NULL;
  841. }
  842. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  843. ublen = chunk->size >> dwidth;
  844. desc->lld.mbr_sa = src;
  845. desc->lld.mbr_da = dst;
  846. desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
  847. desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
  848. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
  849. | AT_XDMAC_MBR_UBC_NDEN
  850. | AT_XDMAC_MBR_UBC_NSEN
  851. | ublen;
  852. desc->lld.mbr_cfg = chan_cc;
  853. dev_dbg(chan2dev(chan),
  854. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  855. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
  856. desc->lld.mbr_ubc, desc->lld.mbr_cfg);
  857. /* Chain lld. */
  858. if (prev)
  859. at_xdmac_queue_desc(chan, prev, desc);
  860. return desc;
  861. }
  862. static struct dma_async_tx_descriptor *
  863. at_xdmac_prep_interleaved(struct dma_chan *chan,
  864. struct dma_interleaved_template *xt,
  865. unsigned long flags)
  866. {
  867. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  868. struct at_xdmac_desc *prev = NULL, *first = NULL;
  869. dma_addr_t dst_addr, src_addr;
  870. size_t src_skip = 0, dst_skip = 0, len = 0;
  871. struct data_chunk *chunk;
  872. int i;
  873. if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
  874. return NULL;
  875. /*
  876. * TODO: Handle the case where we have to repeat a chain of
  877. * descriptors...
  878. */
  879. if ((xt->numf > 1) && (xt->frame_size > 1))
  880. return NULL;
  881. dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
  882. __func__, &xt->src_start, &xt->dst_start, xt->numf,
  883. xt->frame_size, flags);
  884. src_addr = xt->src_start;
  885. dst_addr = xt->dst_start;
  886. if (xt->numf > 1) {
  887. first = at_xdmac_interleaved_queue_desc(chan, atchan,
  888. NULL,
  889. src_addr, dst_addr,
  890. xt, xt->sgl);
  891. if (!first)
  892. return NULL;
  893. /* Length of the block is (BLEN+1) microblocks. */
  894. for (i = 0; i < xt->numf - 1; i++)
  895. at_xdmac_increment_block_count(chan, first);
  896. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  897. __func__, first, first);
  898. list_add_tail(&first->desc_node, &first->descs_list);
  899. } else {
  900. for (i = 0; i < xt->frame_size; i++) {
  901. size_t src_icg = 0, dst_icg = 0;
  902. struct at_xdmac_desc *desc;
  903. chunk = xt->sgl + i;
  904. dst_icg = dmaengine_get_dst_icg(xt, chunk);
  905. src_icg = dmaengine_get_src_icg(xt, chunk);
  906. src_skip = chunk->size + src_icg;
  907. dst_skip = chunk->size + dst_icg;
  908. dev_dbg(chan2dev(chan),
  909. "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
  910. __func__, chunk->size, src_icg, dst_icg);
  911. desc = at_xdmac_interleaved_queue_desc(chan, atchan,
  912. prev,
  913. src_addr, dst_addr,
  914. xt, chunk);
  915. if (!desc) {
  916. if (first)
  917. list_splice_tail_init(&first->descs_list,
  918. &atchan->free_descs_list);
  919. return NULL;
  920. }
  921. if (!first)
  922. first = desc;
  923. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  924. __func__, desc, first);
  925. list_add_tail(&desc->desc_node, &first->descs_list);
  926. if (xt->src_sgl)
  927. src_addr += src_skip;
  928. if (xt->dst_sgl)
  929. dst_addr += dst_skip;
  930. len += chunk->size;
  931. prev = desc;
  932. }
  933. }
  934. first->tx_dma_desc.cookie = -EBUSY;
  935. first->tx_dma_desc.flags = flags;
  936. first->xfer_size = len;
  937. return &first->tx_dma_desc;
  938. }
  939. static struct dma_async_tx_descriptor *
  940. at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  941. size_t len, unsigned long flags)
  942. {
  943. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  944. struct at_xdmac_desc *first = NULL, *prev = NULL;
  945. size_t remaining_size = len, xfer_size = 0, ublen;
  946. dma_addr_t src_addr = src, dst_addr = dest;
  947. u32 dwidth;
  948. /*
  949. * WARNING: We don't know the direction, it involves we can't
  950. * dynamically set the source and dest interface so we have to use the
  951. * same one. Only interface 0 allows EBI access. Hopefully we can
  952. * access DDR through both ports (at least on SAMA5D4x), so we can use
  953. * the same interface for source and dest, that solves the fact we
  954. * don't know the direction.
  955. * ERRATA: Even if useless for memory transfers, the PERID has to not
  956. * match the one of another channel. If not, it could lead to spurious
  957. * flag status.
  958. * For SAMA7G5x case, the SIF and DIF fields are no longer used.
  959. * Thus, no need to have the SIF/DIF interfaces here.
  960. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
  961. * zero.
  962. */
  963. u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
  964. | AT_XDMAC_CC_DAM_INCREMENTED_AM
  965. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  966. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  967. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  968. unsigned long irqflags;
  969. dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
  970. __func__, &src, &dest, len, flags);
  971. if (unlikely(!len))
  972. return NULL;
  973. dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
  974. /* Prepare descriptors. */
  975. while (remaining_size) {
  976. struct at_xdmac_desc *desc = NULL;
  977. dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
  978. spin_lock_irqsave(&atchan->lock, irqflags);
  979. desc = at_xdmac_get_desc(atchan);
  980. spin_unlock_irqrestore(&atchan->lock, irqflags);
  981. if (!desc) {
  982. dev_err(chan2dev(chan), "can't get descriptor\n");
  983. if (first)
  984. list_splice_tail_init(&first->descs_list,
  985. &atchan->free_descs_list);
  986. return NULL;
  987. }
  988. /* Update src and dest addresses. */
  989. src_addr += xfer_size;
  990. dst_addr += xfer_size;
  991. if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
  992. xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
  993. else
  994. xfer_size = remaining_size;
  995. dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
  996. /* Check remaining length and change data width if needed. */
  997. dwidth = at_xdmac_align_width(chan,
  998. src_addr | dst_addr | xfer_size);
  999. chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
  1000. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  1001. ublen = xfer_size >> dwidth;
  1002. remaining_size -= xfer_size;
  1003. desc->lld.mbr_sa = src_addr;
  1004. desc->lld.mbr_da = dst_addr;
  1005. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
  1006. | AT_XDMAC_MBR_UBC_NDEN
  1007. | AT_XDMAC_MBR_UBC_NSEN
  1008. | ublen;
  1009. desc->lld.mbr_cfg = chan_cc;
  1010. dev_dbg(chan2dev(chan),
  1011. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  1012. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
  1013. /* Chain lld. */
  1014. if (prev)
  1015. at_xdmac_queue_desc(chan, prev, desc);
  1016. prev = desc;
  1017. if (!first)
  1018. first = desc;
  1019. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  1020. __func__, desc, first);
  1021. list_add_tail(&desc->desc_node, &first->descs_list);
  1022. }
  1023. first->tx_dma_desc.flags = flags;
  1024. first->xfer_size = len;
  1025. return &first->tx_dma_desc;
  1026. }
  1027. static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
  1028. struct at_xdmac_chan *atchan,
  1029. dma_addr_t dst_addr,
  1030. size_t len,
  1031. int value)
  1032. {
  1033. struct at_xdmac_desc *desc;
  1034. unsigned long flags;
  1035. size_t ublen;
  1036. u32 dwidth;
  1037. char pattern;
  1038. /*
  1039. * WARNING: The channel configuration is set here since there is no
  1040. * dmaengine_slave_config call in this case. Moreover we don't know the
  1041. * direction, it involves we can't dynamically set the source and dest
  1042. * interface so we have to use the same one. Only interface 0 allows EBI
  1043. * access. Hopefully we can access DDR through both ports (at least on
  1044. * SAMA5D4x), so we can use the same interface for source and dest,
  1045. * that solves the fact we don't know the direction.
  1046. * ERRATA: Even if useless for memory transfers, the PERID has to not
  1047. * match the one of another channel. If not, it could lead to spurious
  1048. * flag status.
  1049. * For SAMA7G5x case, the SIF and DIF fields are no longer used.
  1050. * Thus, no need to have the SIF/DIF interfaces here.
  1051. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
  1052. * zero.
  1053. */
  1054. u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
  1055. | AT_XDMAC_CC_DAM_UBS_AM
  1056. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  1057. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  1058. | AT_XDMAC_CC_MEMSET_HW_MODE
  1059. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  1060. dwidth = at_xdmac_align_width(chan, dst_addr);
  1061. if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
  1062. dev_err(chan2dev(chan),
  1063. "%s: Transfer too large, aborting...\n",
  1064. __func__);
  1065. return NULL;
  1066. }
  1067. spin_lock_irqsave(&atchan->lock, flags);
  1068. desc = at_xdmac_get_desc(atchan);
  1069. spin_unlock_irqrestore(&atchan->lock, flags);
  1070. if (!desc) {
  1071. dev_err(chan2dev(chan), "can't get descriptor\n");
  1072. return NULL;
  1073. }
  1074. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  1075. /* Only the first byte of value is to be used according to dmaengine */
  1076. pattern = (char)value;
  1077. ublen = len >> dwidth;
  1078. desc->lld.mbr_da = dst_addr;
  1079. desc->lld.mbr_ds = (pattern << 24) |
  1080. (pattern << 16) |
  1081. (pattern << 8) |
  1082. pattern;
  1083. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
  1084. | AT_XDMAC_MBR_UBC_NDEN
  1085. | AT_XDMAC_MBR_UBC_NSEN
  1086. | ublen;
  1087. desc->lld.mbr_cfg = chan_cc;
  1088. dev_dbg(chan2dev(chan),
  1089. "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  1090. __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
  1091. desc->lld.mbr_cfg);
  1092. return desc;
  1093. }
  1094. static struct dma_async_tx_descriptor *
  1095. at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
  1096. size_t len, unsigned long flags)
  1097. {
  1098. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1099. struct at_xdmac_desc *desc;
  1100. dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
  1101. __func__, &dest, len, value, flags);
  1102. if (unlikely(!len))
  1103. return NULL;
  1104. desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
  1105. list_add_tail(&desc->desc_node, &desc->descs_list);
  1106. desc->tx_dma_desc.cookie = -EBUSY;
  1107. desc->tx_dma_desc.flags = flags;
  1108. desc->xfer_size = len;
  1109. return &desc->tx_dma_desc;
  1110. }
  1111. static struct dma_async_tx_descriptor *
  1112. at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
  1113. unsigned int sg_len, int value,
  1114. unsigned long flags)
  1115. {
  1116. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1117. struct at_xdmac_desc *desc, *pdesc = NULL,
  1118. *ppdesc = NULL, *first = NULL;
  1119. struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
  1120. size_t stride = 0, pstride = 0, len = 0;
  1121. int i;
  1122. if (!sgl)
  1123. return NULL;
  1124. dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
  1125. __func__, sg_len, value, flags);
  1126. /* Prepare descriptors. */
  1127. for_each_sg(sgl, sg, sg_len, i) {
  1128. dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
  1129. __func__, &sg_dma_address(sg), sg_dma_len(sg),
  1130. value, flags);
  1131. desc = at_xdmac_memset_create_desc(chan, atchan,
  1132. sg_dma_address(sg),
  1133. sg_dma_len(sg),
  1134. value);
  1135. if (!desc && first)
  1136. list_splice_tail_init(&first->descs_list,
  1137. &atchan->free_descs_list);
  1138. if (!first)
  1139. first = desc;
  1140. /* Update our strides */
  1141. pstride = stride;
  1142. if (psg)
  1143. stride = sg_dma_address(sg) -
  1144. (sg_dma_address(psg) + sg_dma_len(psg));
  1145. /*
  1146. * The scatterlist API gives us only the address and
  1147. * length of each elements.
  1148. *
  1149. * Unfortunately, we don't have the stride, which we
  1150. * will need to compute.
  1151. *
  1152. * That make us end up in a situation like this one:
  1153. * len stride len stride len
  1154. * +-------+ +-------+ +-------+
  1155. * | N-2 | | N-1 | | N |
  1156. * +-------+ +-------+ +-------+
  1157. *
  1158. * We need all these three elements (N-2, N-1 and N)
  1159. * to actually take the decision on whether we need to
  1160. * queue N-1 or reuse N-2.
  1161. *
  1162. * We will only consider N if it is the last element.
  1163. */
  1164. if (ppdesc && pdesc) {
  1165. if ((stride == pstride) &&
  1166. (sg_dma_len(ppsg) == sg_dma_len(psg))) {
  1167. dev_dbg(chan2dev(chan),
  1168. "%s: desc 0x%p can be merged with desc 0x%p\n",
  1169. __func__, pdesc, ppdesc);
  1170. /*
  1171. * Increment the block count of the
  1172. * N-2 descriptor
  1173. */
  1174. at_xdmac_increment_block_count(chan, ppdesc);
  1175. ppdesc->lld.mbr_dus = stride;
  1176. /*
  1177. * Put back the N-1 descriptor in the
  1178. * free descriptor list
  1179. */
  1180. list_add_tail(&pdesc->desc_node,
  1181. &atchan->free_descs_list);
  1182. /*
  1183. * Make our N-1 descriptor pointer
  1184. * point to the N-2 since they were
  1185. * actually merged.
  1186. */
  1187. pdesc = ppdesc;
  1188. /*
  1189. * Rule out the case where we don't have
  1190. * pstride computed yet (our second sg
  1191. * element)
  1192. *
  1193. * We also want to catch the case where there
  1194. * would be a negative stride,
  1195. */
  1196. } else if (pstride ||
  1197. sg_dma_address(sg) < sg_dma_address(psg)) {
  1198. /*
  1199. * Queue the N-1 descriptor after the
  1200. * N-2
  1201. */
  1202. at_xdmac_queue_desc(chan, ppdesc, pdesc);
  1203. /*
  1204. * Add the N-1 descriptor to the list
  1205. * of the descriptors used for this
  1206. * transfer
  1207. */
  1208. list_add_tail(&desc->desc_node,
  1209. &first->descs_list);
  1210. dev_dbg(chan2dev(chan),
  1211. "%s: add desc 0x%p to descs_list 0x%p\n",
  1212. __func__, desc, first);
  1213. }
  1214. }
  1215. /*
  1216. * If we are the last element, just see if we have the
  1217. * same size than the previous element.
  1218. *
  1219. * If so, we can merge it with the previous descriptor
  1220. * since we don't care about the stride anymore.
  1221. */
  1222. if ((i == (sg_len - 1)) &&
  1223. sg_dma_len(psg) == sg_dma_len(sg)) {
  1224. dev_dbg(chan2dev(chan),
  1225. "%s: desc 0x%p can be merged with desc 0x%p\n",
  1226. __func__, desc, pdesc);
  1227. /*
  1228. * Increment the block count of the N-1
  1229. * descriptor
  1230. */
  1231. at_xdmac_increment_block_count(chan, pdesc);
  1232. pdesc->lld.mbr_dus = stride;
  1233. /*
  1234. * Put back the N descriptor in the free
  1235. * descriptor list
  1236. */
  1237. list_add_tail(&desc->desc_node,
  1238. &atchan->free_descs_list);
  1239. }
  1240. /* Update our descriptors */
  1241. ppdesc = pdesc;
  1242. pdesc = desc;
  1243. /* Update our scatter pointers */
  1244. ppsg = psg;
  1245. psg = sg;
  1246. len += sg_dma_len(sg);
  1247. }
  1248. first->tx_dma_desc.cookie = -EBUSY;
  1249. first->tx_dma_desc.flags = flags;
  1250. first->xfer_size = len;
  1251. return &first->tx_dma_desc;
  1252. }
  1253. static enum dma_status
  1254. at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  1255. struct dma_tx_state *txstate)
  1256. {
  1257. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1258. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1259. struct at_xdmac_desc *desc, *_desc, *iter;
  1260. struct list_head *descs_list;
  1261. enum dma_status ret;
  1262. int residue, retry;
  1263. u32 cur_nda, check_nda, cur_ubc, mask, value;
  1264. u8 dwidth = 0;
  1265. unsigned long flags;
  1266. bool initd;
  1267. ret = dma_cookie_status(chan, cookie, txstate);
  1268. if (ret == DMA_COMPLETE || !txstate)
  1269. return ret;
  1270. spin_lock_irqsave(&atchan->lock, flags);
  1271. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
  1272. /*
  1273. * If the transfer has not been started yet, don't need to compute the
  1274. * residue, it's the transfer length.
  1275. */
  1276. if (!desc->active_xfer) {
  1277. dma_set_residue(txstate, desc->xfer_size);
  1278. goto spin_unlock;
  1279. }
  1280. residue = desc->xfer_size;
  1281. /*
  1282. * Flush FIFO: only relevant when the transfer is source peripheral
  1283. * synchronized. Flush is needed before reading CUBC because data in
  1284. * the FIFO are not reported by CUBC. Reporting a residue of the
  1285. * transfer length while we have data in FIFO can cause issue.
  1286. * Usecase: atmel USART has a timeout which means I have received
  1287. * characters but there is no more character received for a while. On
  1288. * timeout, it requests the residue. If the data are in the DMA FIFO,
  1289. * we will return a residue of the transfer length. It means no data
  1290. * received. If an application is waiting for these data, it will hang
  1291. * since we won't have another USART timeout without receiving new
  1292. * data.
  1293. */
  1294. mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
  1295. value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
  1296. if ((desc->lld.mbr_cfg & mask) == value) {
  1297. at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
  1298. while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
  1299. cpu_relax();
  1300. }
  1301. /*
  1302. * The easiest way to compute the residue should be to pause the DMA
  1303. * but doing this can lead to miss some data as some devices don't
  1304. * have FIFO.
  1305. * We need to read several registers because:
  1306. * - DMA is running therefore a descriptor change is possible while
  1307. * reading these registers
  1308. * - When the block transfer is done, the value of the CUBC register
  1309. * is set to its initial value until the fetch of the next descriptor.
  1310. * This value will corrupt the residue calculation so we have to skip
  1311. * it.
  1312. *
  1313. * INITD -------- ------------
  1314. * |____________________|
  1315. * _______________________ _______________
  1316. * NDA @desc2 \/ @desc3
  1317. * _______________________/\_______________
  1318. * __________ ___________ _______________
  1319. * CUBC 0 \/ MAX desc1 \/ MAX desc2
  1320. * __________/\___________/\_______________
  1321. *
  1322. * Since descriptors are aligned on 64 bits, we can assume that
  1323. * the update of NDA and CUBC is atomic.
  1324. * Memory barriers are used to ensure the read order of the registers.
  1325. * A max number of retries is set because unlikely it could never ends.
  1326. */
  1327. for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
  1328. check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
  1329. rmb();
  1330. cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
  1331. rmb();
  1332. initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
  1333. rmb();
  1334. cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
  1335. rmb();
  1336. if ((check_nda == cur_nda) && initd)
  1337. break;
  1338. }
  1339. if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
  1340. ret = DMA_ERROR;
  1341. goto spin_unlock;
  1342. }
  1343. /*
  1344. * Flush FIFO: only relevant when the transfer is source peripheral
  1345. * synchronized. Another flush is needed here because CUBC is updated
  1346. * when the controller sends the data write command. It can lead to
  1347. * report data that are not written in the memory or the device. The
  1348. * FIFO flush ensures that data are really written.
  1349. */
  1350. if ((desc->lld.mbr_cfg & mask) == value) {
  1351. at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
  1352. while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
  1353. cpu_relax();
  1354. }
  1355. /*
  1356. * Remove size of all microblocks already transferred and the current
  1357. * one. Then add the remaining size to transfer of the current
  1358. * microblock.
  1359. */
  1360. descs_list = &desc->descs_list;
  1361. list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
  1362. dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
  1363. residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
  1364. if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
  1365. desc = iter;
  1366. break;
  1367. }
  1368. }
  1369. residue += cur_ubc << dwidth;
  1370. dma_set_residue(txstate, residue);
  1371. dev_dbg(chan2dev(chan),
  1372. "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
  1373. __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
  1374. spin_unlock:
  1375. spin_unlock_irqrestore(&atchan->lock, flags);
  1376. return ret;
  1377. }
  1378. static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
  1379. {
  1380. struct at_xdmac_desc *desc;
  1381. /*
  1382. * If channel is enabled, do nothing, advance_work will be triggered
  1383. * after the interruption.
  1384. */
  1385. if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
  1386. return;
  1387. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
  1388. xfer_node);
  1389. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
  1390. if (!desc->active_xfer)
  1391. at_xdmac_start_xfer(atchan, desc);
  1392. }
  1393. static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
  1394. {
  1395. struct at_xdmac_desc *desc;
  1396. struct dma_async_tx_descriptor *txd;
  1397. spin_lock_irq(&atchan->lock);
  1398. dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
  1399. __func__, atchan->irq_status);
  1400. if (list_empty(&atchan->xfers_list)) {
  1401. spin_unlock_irq(&atchan->lock);
  1402. return;
  1403. }
  1404. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
  1405. xfer_node);
  1406. spin_unlock_irq(&atchan->lock);
  1407. txd = &desc->tx_dma_desc;
  1408. if (txd->flags & DMA_PREP_INTERRUPT)
  1409. dmaengine_desc_get_callback_invoke(txd, NULL);
  1410. }
  1411. /* Called with atchan->lock held. */
  1412. static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
  1413. {
  1414. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1415. struct at_xdmac_desc *bad_desc;
  1416. /*
  1417. * The descriptor currently at the head of the active list is
  1418. * broken. Since we don't have any way to report errors, we'll
  1419. * just have to scream loudly and try to continue with other
  1420. * descriptors queued (if any).
  1421. */
  1422. if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
  1423. dev_err(chan2dev(&atchan->chan), "read bus error!!!");
  1424. if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
  1425. dev_err(chan2dev(&atchan->chan), "write bus error!!!");
  1426. if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
  1427. dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
  1428. /* Channel must be disabled first as it's not done automatically */
  1429. at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
  1430. while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
  1431. cpu_relax();
  1432. bad_desc = list_first_entry(&atchan->xfers_list,
  1433. struct at_xdmac_desc,
  1434. xfer_node);
  1435. /* Print bad descriptor's details if needed */
  1436. dev_dbg(chan2dev(&atchan->chan),
  1437. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
  1438. __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
  1439. bad_desc->lld.mbr_ubc);
  1440. /* Then continue with usual descriptor management */
  1441. }
  1442. static void at_xdmac_tasklet(struct tasklet_struct *t)
  1443. {
  1444. struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
  1445. struct at_xdmac_desc *desc;
  1446. struct dma_async_tx_descriptor *txd;
  1447. u32 error_mask;
  1448. if (at_xdmac_chan_is_cyclic(atchan))
  1449. return at_xdmac_handle_cyclic(atchan);
  1450. error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
  1451. AT_XDMAC_CIS_ROIS;
  1452. spin_lock_irq(&atchan->lock);
  1453. dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
  1454. __func__, atchan->irq_status);
  1455. if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
  1456. !(atchan->irq_status & error_mask)) {
  1457. spin_unlock_irq(&atchan->lock);
  1458. return;
  1459. }
  1460. if (atchan->irq_status & error_mask)
  1461. at_xdmac_handle_error(atchan);
  1462. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
  1463. xfer_node);
  1464. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
  1465. if (!desc->active_xfer) {
  1466. dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
  1467. spin_unlock_irq(&atchan->lock);
  1468. return;
  1469. }
  1470. txd = &desc->tx_dma_desc;
  1471. dma_cookie_complete(txd);
  1472. /* Remove the transfer from the transfer list. */
  1473. list_del(&desc->xfer_node);
  1474. spin_unlock_irq(&atchan->lock);
  1475. if (txd->flags & DMA_PREP_INTERRUPT)
  1476. dmaengine_desc_get_callback_invoke(txd, NULL);
  1477. dma_run_dependencies(txd);
  1478. spin_lock_irq(&atchan->lock);
  1479. /* Move the xfer descriptors into the free descriptors list. */
  1480. list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
  1481. at_xdmac_advance_work(atchan);
  1482. spin_unlock_irq(&atchan->lock);
  1483. }
  1484. static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
  1485. {
  1486. struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
  1487. struct at_xdmac_chan *atchan;
  1488. u32 imr, status, pending;
  1489. u32 chan_imr, chan_status;
  1490. int i, ret = IRQ_NONE;
  1491. do {
  1492. imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
  1493. status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
  1494. pending = status & imr;
  1495. dev_vdbg(atxdmac->dma.dev,
  1496. "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
  1497. __func__, status, imr, pending);
  1498. if (!pending)
  1499. break;
  1500. /* We have to find which channel has generated the interrupt. */
  1501. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  1502. if (!((1 << i) & pending))
  1503. continue;
  1504. atchan = &atxdmac->chan[i];
  1505. chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
  1506. chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
  1507. atchan->irq_status = chan_status & chan_imr;
  1508. dev_vdbg(atxdmac->dma.dev,
  1509. "%s: chan%d: imr=0x%x, status=0x%x\n",
  1510. __func__, i, chan_imr, chan_status);
  1511. dev_vdbg(chan2dev(&atchan->chan),
  1512. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  1513. __func__,
  1514. at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  1515. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  1516. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  1517. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  1518. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  1519. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  1520. if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
  1521. at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
  1522. tasklet_schedule(&atchan->tasklet);
  1523. ret = IRQ_HANDLED;
  1524. }
  1525. } while (pending);
  1526. return ret;
  1527. }
  1528. static void at_xdmac_issue_pending(struct dma_chan *chan)
  1529. {
  1530. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1531. unsigned long flags;
  1532. dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
  1533. spin_lock_irqsave(&atchan->lock, flags);
  1534. at_xdmac_advance_work(atchan);
  1535. spin_unlock_irqrestore(&atchan->lock, flags);
  1536. return;
  1537. }
  1538. static int at_xdmac_device_config(struct dma_chan *chan,
  1539. struct dma_slave_config *config)
  1540. {
  1541. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1542. int ret;
  1543. unsigned long flags;
  1544. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1545. spin_lock_irqsave(&atchan->lock, flags);
  1546. ret = at_xdmac_set_slave_config(chan, config);
  1547. spin_unlock_irqrestore(&atchan->lock, flags);
  1548. return ret;
  1549. }
  1550. static int at_xdmac_device_pause(struct dma_chan *chan)
  1551. {
  1552. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1553. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1554. unsigned long flags;
  1555. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1556. if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
  1557. return 0;
  1558. spin_lock_irqsave(&atchan->lock, flags);
  1559. at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
  1560. while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
  1561. & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
  1562. cpu_relax();
  1563. spin_unlock_irqrestore(&atchan->lock, flags);
  1564. return 0;
  1565. }
  1566. static int at_xdmac_device_resume(struct dma_chan *chan)
  1567. {
  1568. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1569. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1570. unsigned long flags;
  1571. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1572. spin_lock_irqsave(&atchan->lock, flags);
  1573. if (!at_xdmac_chan_is_paused(atchan)) {
  1574. spin_unlock_irqrestore(&atchan->lock, flags);
  1575. return 0;
  1576. }
  1577. at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
  1578. clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
  1579. spin_unlock_irqrestore(&atchan->lock, flags);
  1580. return 0;
  1581. }
  1582. static int at_xdmac_device_terminate_all(struct dma_chan *chan)
  1583. {
  1584. struct at_xdmac_desc *desc, *_desc;
  1585. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1586. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1587. unsigned long flags;
  1588. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1589. spin_lock_irqsave(&atchan->lock, flags);
  1590. at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
  1591. while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
  1592. cpu_relax();
  1593. /* Cancel all pending transfers. */
  1594. list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
  1595. list_del(&desc->xfer_node);
  1596. list_splice_tail_init(&desc->descs_list,
  1597. &atchan->free_descs_list);
  1598. }
  1599. clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
  1600. clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
  1601. spin_unlock_irqrestore(&atchan->lock, flags);
  1602. return 0;
  1603. }
  1604. static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
  1605. {
  1606. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1607. struct at_xdmac_desc *desc;
  1608. int i;
  1609. if (at_xdmac_chan_is_enabled(atchan)) {
  1610. dev_err(chan2dev(chan),
  1611. "can't allocate channel resources (channel enabled)\n");
  1612. return -EIO;
  1613. }
  1614. if (!list_empty(&atchan->free_descs_list)) {
  1615. dev_err(chan2dev(chan),
  1616. "can't allocate channel resources (channel not free from a previous use)\n");
  1617. return -EIO;
  1618. }
  1619. for (i = 0; i < init_nr_desc_per_channel; i++) {
  1620. desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
  1621. if (!desc) {
  1622. if (i == 0) {
  1623. dev_warn(chan2dev(chan),
  1624. "can't allocate any descriptors\n");
  1625. return -EIO;
  1626. }
  1627. dev_warn(chan2dev(chan),
  1628. "only %d descriptors have been allocated\n", i);
  1629. break;
  1630. }
  1631. list_add_tail(&desc->desc_node, &atchan->free_descs_list);
  1632. }
  1633. dma_cookie_init(chan);
  1634. dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
  1635. return i;
  1636. }
  1637. static void at_xdmac_free_chan_resources(struct dma_chan *chan)
  1638. {
  1639. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1640. struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
  1641. struct at_xdmac_desc *desc, *_desc;
  1642. list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
  1643. dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
  1644. list_del(&desc->desc_node);
  1645. dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
  1646. }
  1647. return;
  1648. }
  1649. static void at_xdmac_axi_config(struct platform_device *pdev)
  1650. {
  1651. struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
  1652. bool dev_m2m = false;
  1653. u32 dma_requests;
  1654. if (!atxdmac->layout->axi_config)
  1655. return; /* Not supported */
  1656. if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
  1657. &dma_requests)) {
  1658. dev_info(&pdev->dev, "controller in mem2mem mode.\n");
  1659. dev_m2m = true;
  1660. }
  1661. if (dev_m2m) {
  1662. at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
  1663. at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
  1664. } else {
  1665. at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
  1666. at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
  1667. }
  1668. }
  1669. static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
  1670. {
  1671. struct at_xdmac *atxdmac = dev_get_drvdata(dev);
  1672. struct dma_chan *chan, *_chan;
  1673. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1674. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1675. /* Wait for transfer completion, except in cyclic case. */
  1676. if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
  1677. return -EAGAIN;
  1678. }
  1679. return 0;
  1680. }
  1681. static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
  1682. {
  1683. struct at_xdmac *atxdmac = dev_get_drvdata(dev);
  1684. struct dma_chan *chan, *_chan;
  1685. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1686. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1687. atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
  1688. if (at_xdmac_chan_is_cyclic(atchan)) {
  1689. if (!at_xdmac_chan_is_paused(atchan))
  1690. at_xdmac_device_pause(chan);
  1691. atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
  1692. atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
  1693. atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
  1694. }
  1695. }
  1696. atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
  1697. atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
  1698. at_xdmac_off(atxdmac);
  1699. clk_disable_unprepare(atxdmac->clk);
  1700. return 0;
  1701. }
  1702. static int __maybe_unused atmel_xdmac_resume(struct device *dev)
  1703. {
  1704. struct at_xdmac *atxdmac = dev_get_drvdata(dev);
  1705. struct at_xdmac_chan *atchan;
  1706. struct dma_chan *chan, *_chan;
  1707. struct platform_device *pdev = container_of(dev, struct platform_device, dev);
  1708. int i;
  1709. int ret;
  1710. ret = clk_prepare_enable(atxdmac->clk);
  1711. if (ret)
  1712. return ret;
  1713. at_xdmac_axi_config(pdev);
  1714. /* Clear pending interrupts. */
  1715. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  1716. atchan = &atxdmac->chan[i];
  1717. while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
  1718. cpu_relax();
  1719. }
  1720. at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
  1721. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1722. atchan = to_at_xdmac_chan(chan);
  1723. at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
  1724. if (at_xdmac_chan_is_cyclic(atchan)) {
  1725. if (at_xdmac_chan_is_paused(atchan))
  1726. at_xdmac_device_resume(chan);
  1727. at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
  1728. at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
  1729. at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
  1730. wmb();
  1731. if (atxdmac->save_gs & atchan->mask)
  1732. at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
  1733. }
  1734. }
  1735. return 0;
  1736. }
  1737. static int at_xdmac_probe(struct platform_device *pdev)
  1738. {
  1739. struct at_xdmac *atxdmac;
  1740. int irq, nr_channels, i, ret;
  1741. void __iomem *base;
  1742. u32 reg;
  1743. irq = platform_get_irq(pdev, 0);
  1744. if (irq < 0)
  1745. return irq;
  1746. base = devm_platform_ioremap_resource(pdev, 0);
  1747. if (IS_ERR(base))
  1748. return PTR_ERR(base);
  1749. /*
  1750. * Read number of xdmac channels, read helper function can't be used
  1751. * since atxdmac is not yet allocated and we need to know the number
  1752. * of channels to do the allocation.
  1753. */
  1754. reg = readl_relaxed(base + AT_XDMAC_GTYPE);
  1755. nr_channels = AT_XDMAC_NB_CH(reg);
  1756. if (nr_channels > AT_XDMAC_MAX_CHAN) {
  1757. dev_err(&pdev->dev, "invalid number of channels (%u)\n",
  1758. nr_channels);
  1759. return -EINVAL;
  1760. }
  1761. atxdmac = devm_kzalloc(&pdev->dev,
  1762. struct_size(atxdmac, chan, nr_channels),
  1763. GFP_KERNEL);
  1764. if (!atxdmac) {
  1765. dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
  1766. return -ENOMEM;
  1767. }
  1768. atxdmac->regs = base;
  1769. atxdmac->irq = irq;
  1770. atxdmac->layout = of_device_get_match_data(&pdev->dev);
  1771. if (!atxdmac->layout)
  1772. return -ENODEV;
  1773. atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
  1774. if (IS_ERR(atxdmac->clk)) {
  1775. dev_err(&pdev->dev, "can't get dma_clk\n");
  1776. return PTR_ERR(atxdmac->clk);
  1777. }
  1778. /* Do not use dev res to prevent races with tasklet */
  1779. ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
  1780. if (ret) {
  1781. dev_err(&pdev->dev, "can't request irq\n");
  1782. return ret;
  1783. }
  1784. ret = clk_prepare_enable(atxdmac->clk);
  1785. if (ret) {
  1786. dev_err(&pdev->dev, "can't prepare or enable clock\n");
  1787. goto err_free_irq;
  1788. }
  1789. atxdmac->at_xdmac_desc_pool =
  1790. dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
  1791. sizeof(struct at_xdmac_desc), 4, 0);
  1792. if (!atxdmac->at_xdmac_desc_pool) {
  1793. dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
  1794. ret = -ENOMEM;
  1795. goto err_clk_disable;
  1796. }
  1797. dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
  1798. dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
  1799. dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
  1800. dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
  1801. dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
  1802. dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
  1803. /*
  1804. * Without DMA_PRIVATE the driver is not able to allocate more than
  1805. * one channel, second allocation fails in private_candidate.
  1806. */
  1807. dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
  1808. atxdmac->dma.dev = &pdev->dev;
  1809. atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
  1810. atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
  1811. atxdmac->dma.device_tx_status = at_xdmac_tx_status;
  1812. atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
  1813. atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
  1814. atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
  1815. atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
  1816. atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
  1817. atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
  1818. atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
  1819. atxdmac->dma.device_config = at_xdmac_device_config;
  1820. atxdmac->dma.device_pause = at_xdmac_device_pause;
  1821. atxdmac->dma.device_resume = at_xdmac_device_resume;
  1822. atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
  1823. atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
  1824. atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
  1825. atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  1826. atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1827. /* Disable all chans and interrupts. */
  1828. at_xdmac_off(atxdmac);
  1829. /* Init channels. */
  1830. INIT_LIST_HEAD(&atxdmac->dma.channels);
  1831. for (i = 0; i < nr_channels; i++) {
  1832. struct at_xdmac_chan *atchan = &atxdmac->chan[i];
  1833. atchan->chan.device = &atxdmac->dma;
  1834. list_add_tail(&atchan->chan.device_node,
  1835. &atxdmac->dma.channels);
  1836. atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
  1837. atchan->mask = 1 << i;
  1838. spin_lock_init(&atchan->lock);
  1839. INIT_LIST_HEAD(&atchan->xfers_list);
  1840. INIT_LIST_HEAD(&atchan->free_descs_list);
  1841. tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
  1842. /* Clear pending interrupts. */
  1843. while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
  1844. cpu_relax();
  1845. }
  1846. platform_set_drvdata(pdev, atxdmac);
  1847. ret = dma_async_device_register(&atxdmac->dma);
  1848. if (ret) {
  1849. dev_err(&pdev->dev, "fail to register DMA engine device\n");
  1850. goto err_clk_disable;
  1851. }
  1852. ret = of_dma_controller_register(pdev->dev.of_node,
  1853. at_xdmac_xlate, atxdmac);
  1854. if (ret) {
  1855. dev_err(&pdev->dev, "could not register of dma controller\n");
  1856. goto err_dma_unregister;
  1857. }
  1858. dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
  1859. nr_channels, atxdmac->regs);
  1860. at_xdmac_axi_config(pdev);
  1861. return 0;
  1862. err_dma_unregister:
  1863. dma_async_device_unregister(&atxdmac->dma);
  1864. err_clk_disable:
  1865. clk_disable_unprepare(atxdmac->clk);
  1866. err_free_irq:
  1867. free_irq(atxdmac->irq, atxdmac);
  1868. return ret;
  1869. }
  1870. static int at_xdmac_remove(struct platform_device *pdev)
  1871. {
  1872. struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
  1873. int i;
  1874. at_xdmac_off(atxdmac);
  1875. of_dma_controller_free(pdev->dev.of_node);
  1876. dma_async_device_unregister(&atxdmac->dma);
  1877. clk_disable_unprepare(atxdmac->clk);
  1878. free_irq(atxdmac->irq, atxdmac);
  1879. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  1880. struct at_xdmac_chan *atchan = &atxdmac->chan[i];
  1881. tasklet_kill(&atchan->tasklet);
  1882. at_xdmac_free_chan_resources(&atchan->chan);
  1883. }
  1884. return 0;
  1885. }
  1886. static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
  1887. .prepare = atmel_xdmac_prepare,
  1888. SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
  1889. };
  1890. static const struct of_device_id atmel_xdmac_dt_ids[] = {
  1891. {
  1892. .compatible = "atmel,sama5d4-dma",
  1893. .data = &at_xdmac_sama5d4_layout,
  1894. }, {
  1895. .compatible = "microchip,sama7g5-dma",
  1896. .data = &at_xdmac_sama7g5_layout,
  1897. }, {
  1898. /* sentinel */
  1899. }
  1900. };
  1901. MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
  1902. static struct platform_driver at_xdmac_driver = {
  1903. .probe = at_xdmac_probe,
  1904. .remove = at_xdmac_remove,
  1905. .driver = {
  1906. .name = "at_xdmac",
  1907. .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
  1908. .pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
  1909. }
  1910. };
  1911. static int __init at_xdmac_init(void)
  1912. {
  1913. return platform_driver_register(&at_xdmac_driver);
  1914. }
  1915. subsys_initcall(at_xdmac_init);
  1916. static void __exit at_xdmac_exit(void)
  1917. {
  1918. platform_driver_unregister(&at_xdmac_driver);
  1919. }
  1920. module_exit(at_xdmac_exit);
  1921. MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
  1922. MODULE_AUTHOR("Ludovic Desroches <[email protected]>");
  1923. MODULE_LICENSE("GPL");