cadence-nand-controller.c 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Cadence NAND flash controller driver
  4. *
  5. * Copyright (C) 2019 Cadence
  6. *
  7. * Author: Piotr Sroka <[email protected]>
  8. */
  9. #include <linux/bitfield.h>
  10. #include <linux/clk.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/module.h>
  15. #include <linux/mtd/mtd.h>
  16. #include <linux/mtd/rawnand.h>
  17. #include <linux/of_device.h>
  18. #include <linux/iopoll.h>
  19. #include <linux/slab.h>
  20. /*
  21. * HPNFC can work in 3 modes:
  22. * - PIO - can work in master or slave DMA
  23. * - CDMA - needs Master DMA for accessing command descriptors.
  24. * - Generic mode - can use only slave DMA.
  25. * CDMA and PIO modes can be used to execute only base commands.
  26. * Generic mode can be used to execute any command
  27. * on NAND flash memory. Driver uses CDMA mode for
  28. * block erasing, page reading, page programing.
  29. * Generic mode is used for executing rest of commands.
  30. */
  31. #define MAX_ADDRESS_CYC 6
  32. #define MAX_ERASE_ADDRESS_CYC 3
  33. #define MAX_DATA_SIZE 0xFFFC
  34. #define DMA_DATA_SIZE_ALIGN 8
  35. /* Register definition. */
  36. /*
  37. * Command register 0.
  38. * Writing data to this register will initiate a new transaction
  39. * of the NF controller.
  40. */
  41. #define CMD_REG0 0x0000
  42. /* Command type field mask. */
  43. #define CMD_REG0_CT GENMASK(31, 30)
  44. /* Command type CDMA. */
  45. #define CMD_REG0_CT_CDMA 0uL
  46. /* Command type generic. */
  47. #define CMD_REG0_CT_GEN 3uL
  48. /* Command thread number field mask. */
  49. #define CMD_REG0_TN GENMASK(27, 24)
  50. /* Command register 2. */
  51. #define CMD_REG2 0x0008
  52. /* Command register 3. */
  53. #define CMD_REG3 0x000C
  54. /* Pointer register to select which thread status will be selected. */
  55. #define CMD_STATUS_PTR 0x0010
  56. /* Command status register for selected thread. */
  57. #define CMD_STATUS 0x0014
  58. /* Interrupt status register. */
  59. #define INTR_STATUS 0x0110
  60. #define INTR_STATUS_SDMA_ERR BIT(22)
  61. #define INTR_STATUS_SDMA_TRIGG BIT(21)
  62. #define INTR_STATUS_UNSUPP_CMD BIT(19)
  63. #define INTR_STATUS_DDMA_TERR BIT(18)
  64. #define INTR_STATUS_CDMA_TERR BIT(17)
  65. #define INTR_STATUS_CDMA_IDL BIT(16)
  66. /* Interrupt enable register. */
  67. #define INTR_ENABLE 0x0114
  68. #define INTR_ENABLE_INTR_EN BIT(31)
  69. #define INTR_ENABLE_SDMA_ERR_EN BIT(22)
  70. #define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
  71. #define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
  72. #define INTR_ENABLE_DDMA_TERR_EN BIT(18)
  73. #define INTR_ENABLE_CDMA_TERR_EN BIT(17)
  74. #define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
  75. /* Controller internal state. */
  76. #define CTRL_STATUS 0x0118
  77. #define CTRL_STATUS_INIT_COMP BIT(9)
  78. #define CTRL_STATUS_CTRL_BUSY BIT(8)
  79. /* Command Engine threads state. */
  80. #define TRD_STATUS 0x0120
  81. /* Command Engine interrupt thread error status. */
  82. #define TRD_ERR_INT_STATUS 0x0128
  83. /* Command Engine interrupt thread error enable. */
  84. #define TRD_ERR_INT_STATUS_EN 0x0130
  85. /* Command Engine interrupt thread complete status. */
  86. #define TRD_COMP_INT_STATUS 0x0138
  87. /*
  88. * Transfer config 0 register.
  89. * Configures data transfer parameters.
  90. */
  91. #define TRAN_CFG_0 0x0400
  92. /* Offset value from the beginning of the page. */
  93. #define TRAN_CFG_0_OFFSET GENMASK(31, 16)
  94. /* Numbers of sectors to transfer within singlNF device's page. */
  95. #define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
  96. /*
  97. * Transfer config 1 register.
  98. * Configures data transfer parameters.
  99. */
  100. #define TRAN_CFG_1 0x0404
  101. /* Size of last data sector. */
  102. #define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
  103. /* Size of not-last data sector. */
  104. #define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
  105. /* ECC engine configuration register 0. */
  106. #define ECC_CONFIG_0 0x0428
  107. /* Correction strength. */
  108. #define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
  109. /* Enable erased pages detection mechanism. */
  110. #define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
  111. /* Enable controller ECC check bits generation and correction. */
  112. #define ECC_CONFIG_0_ECC_EN BIT(0)
  113. /* ECC engine configuration register 1. */
  114. #define ECC_CONFIG_1 0x042C
  115. /* Multiplane settings register. */
  116. #define MULTIPLANE_CFG 0x0434
  117. /* Cache operation settings. */
  118. #define CACHE_CFG 0x0438
  119. /* DMA settings register. */
  120. #define DMA_SETINGS 0x043C
  121. /* Enable SDMA error report on access unprepared slave DMA interface. */
  122. #define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
  123. /* Transferred data block size for the slave DMA module. */
  124. #define SDMA_SIZE 0x0440
  125. /* Thread number associated with transferred data block
  126. * for the slave DMA module.
  127. */
  128. #define SDMA_TRD_NUM 0x0444
  129. /* Thread number mask. */
  130. #define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
  131. #define CONTROL_DATA_CTRL 0x0494
  132. /* Thread number mask. */
  133. #define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
  134. #define CTRL_VERSION 0x800
  135. #define CTRL_VERSION_REV GENMASK(7, 0)
  136. /* Available hardware features of the controller. */
  137. #define CTRL_FEATURES 0x804
  138. /* Support for NV-DDR2/3 work mode. */
  139. #define CTRL_FEATURES_NVDDR_2_3 BIT(28)
  140. /* Support for NV-DDR work mode. */
  141. #define CTRL_FEATURES_NVDDR BIT(27)
  142. /* Support for asynchronous work mode. */
  143. #define CTRL_FEATURES_ASYNC BIT(26)
  144. /* Support for asynchronous work mode. */
  145. #define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
  146. /* Slave and Master DMA data width. */
  147. #define CTRL_FEATURES_DMA_DWITH64 BIT(21)
  148. /* Availability of Control Data feature.*/
  149. #define CTRL_FEATURES_CONTROL_DATA BIT(10)
  150. /* BCH Engine identification register 0 - correction strengths. */
  151. #define BCH_CFG_0 0x838
  152. #define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
  153. #define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
  154. #define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
  155. #define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
  156. /* BCH Engine identification register 1 - correction strengths. */
  157. #define BCH_CFG_1 0x83C
  158. #define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
  159. #define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
  160. #define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
  161. #define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
  162. /* BCH Engine identification register 2 - sector sizes. */
  163. #define BCH_CFG_2 0x840
  164. #define BCH_CFG_2_SECT_0 GENMASK(15, 0)
  165. #define BCH_CFG_2_SECT_1 GENMASK(31, 16)
  166. /* BCH Engine identification register 3. */
  167. #define BCH_CFG_3 0x844
  168. #define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
  169. /* Ready/Busy# line status. */
  170. #define RBN_SETINGS 0x1004
  171. /* Common settings. */
  172. #define COMMON_SET 0x1008
  173. /* 16 bit device connected to the NAND Flash interface. */
  174. #define COMMON_SET_DEVICE_16BIT BIT(8)
  175. /* Skip_bytes registers. */
  176. #define SKIP_BYTES_CONF 0x100C
  177. #define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
  178. #define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
  179. #define SKIP_BYTES_OFFSET 0x1010
  180. #define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
  181. /* Timings configuration. */
  182. #define ASYNC_TOGGLE_TIMINGS 0x101c
  183. #define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
  184. #define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
  185. #define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
  186. #define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
  187. #define TIMINGS0 0x1024
  188. #define TIMINGS0_TADL GENMASK(31, 24)
  189. #define TIMINGS0_TCCS GENMASK(23, 16)
  190. #define TIMINGS0_TWHR GENMASK(15, 8)
  191. #define TIMINGS0_TRHW GENMASK(7, 0)
  192. #define TIMINGS1 0x1028
  193. #define TIMINGS1_TRHZ GENMASK(31, 24)
  194. #define TIMINGS1_TWB GENMASK(23, 16)
  195. #define TIMINGS1_TVDLY GENMASK(7, 0)
  196. #define TIMINGS2 0x102c
  197. #define TIMINGS2_TFEAT GENMASK(25, 16)
  198. #define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
  199. #define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
  200. /* Configuration of the resynchronization of slave DLL of PHY. */
  201. #define DLL_PHY_CTRL 0x1034
  202. #define DLL_PHY_CTRL_DLL_RST_N BIT(24)
  203. #define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
  204. #define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
  205. #define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
  206. #define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
  207. /* Register controlling DQ related timing. */
  208. #define PHY_DQ_TIMING 0x2000
  209. /* Register controlling DSQ related timing. */
  210. #define PHY_DQS_TIMING 0x2004
  211. #define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
  212. #define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
  213. #define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
  214. /* Register controlling the gate and loopback control related timing. */
  215. #define PHY_GATE_LPBK_CTRL 0x2008
  216. #define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
  217. /* Register holds the control for the master DLL logic. */
  218. #define PHY_DLL_MASTER_CTRL 0x200C
  219. #define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
  220. /* Register holds the control for the slave DLL logic. */
  221. #define PHY_DLL_SLAVE_CTRL 0x2010
  222. /* This register handles the global control settings for the PHY. */
  223. #define PHY_CTRL 0x2080
  224. #define PHY_CTRL_SDR_DQS BIT(14)
  225. #define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
  226. /*
  227. * This register handles the global control settings
  228. * for the termination selects for reads.
  229. */
  230. #define PHY_TSEL 0x2084
  231. /* Generic command layout. */
  232. #define GCMD_LAY_CS GENMASK_ULL(11, 8)
  233. /*
  234. * This bit informs the minicotroller if it has to wait for tWB
  235. * after sending the last CMD/ADDR/DATA in the sequence.
  236. */
  237. #define GCMD_LAY_TWB BIT_ULL(6)
  238. /* Type of generic instruction. */
  239. #define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
  240. /* Generic CMD sequence type. */
  241. #define GCMD_LAY_INSTR_CMD 0
  242. /* Generic ADDR sequence type. */
  243. #define GCMD_LAY_INSTR_ADDR 1
  244. /* Generic data transfer sequence type. */
  245. #define GCMD_LAY_INSTR_DATA 2
  246. /* Input part of generic command type of input is command. */
  247. #define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
  248. /* Generic command address sequence - address fields. */
  249. #define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
  250. /* Generic command address sequence - address size. */
  251. #define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
  252. /* Transfer direction field of generic command data sequence. */
  253. #define GCMD_DIR BIT_ULL(11)
  254. /* Read transfer direction of generic command data sequence. */
  255. #define GCMD_DIR_READ 0
  256. /* Write transfer direction of generic command data sequence. */
  257. #define GCMD_DIR_WRITE 1
  258. /* ECC enabled flag of generic command data sequence - ECC enabled. */
  259. #define GCMD_ECC_EN BIT_ULL(12)
  260. /* Generic command data sequence - sector size. */
  261. #define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
  262. /* Generic command data sequence - sector count. */
  263. #define GCMD_SECT_CNT GENMASK_ULL(39, 32)
  264. /* Generic command data sequence - last sector size. */
  265. #define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
  266. /* CDMA descriptor fields. */
  267. /* Erase command type of CDMA descriptor. */
  268. #define CDMA_CT_ERASE 0x1000
  269. /* Program page command type of CDMA descriptor. */
  270. #define CDMA_CT_WR 0x2100
  271. /* Read page command type of CDMA descriptor. */
  272. #define CDMA_CT_RD 0x2200
  273. /* Flash pointer memory shift. */
  274. #define CDMA_CFPTR_MEM_SHIFT 24
  275. /* Flash pointer memory mask. */
  276. #define CDMA_CFPTR_MEM GENMASK(26, 24)
  277. /*
  278. * Command DMA descriptor flags. If set causes issue interrupt after
  279. * the completion of descriptor processing.
  280. */
  281. #define CDMA_CF_INT BIT(8)
  282. /*
  283. * Command DMA descriptor flags - the next descriptor
  284. * address field is valid and descriptor processing should continue.
  285. */
  286. #define CDMA_CF_CONT BIT(9)
  287. /* DMA master flag of command DMA descriptor. */
  288. #define CDMA_CF_DMA_MASTER BIT(10)
  289. /* Operation complete status of command descriptor. */
  290. #define CDMA_CS_COMP BIT(15)
  291. /* Operation complete status of command descriptor. */
  292. /* Command descriptor status - operation fail. */
  293. #define CDMA_CS_FAIL BIT(14)
  294. /* Command descriptor status - page erased. */
  295. #define CDMA_CS_ERP BIT(11)
  296. /* Command descriptor status - timeout occurred. */
  297. #define CDMA_CS_TOUT BIT(10)
  298. /*
  299. * Maximum amount of correction applied to one ECC sector.
  300. * It is part of command descriptor status.
  301. */
  302. #define CDMA_CS_MAXERR GENMASK(9, 2)
  303. /* Command descriptor status - uncorrectable ECC error. */
  304. #define CDMA_CS_UNCE BIT(1)
  305. /* Command descriptor status - descriptor error. */
  306. #define CDMA_CS_ERR BIT(0)
  307. /* Status of operation - OK. */
  308. #define STAT_OK 0
  309. /* Status of operation - FAIL. */
  310. #define STAT_FAIL 2
  311. /* Status of operation - uncorrectable ECC error. */
  312. #define STAT_ECC_UNCORR 3
  313. /* Status of operation - page erased. */
  314. #define STAT_ERASED 5
  315. /* Status of operation - correctable ECC error. */
  316. #define STAT_ECC_CORR 6
  317. /* Status of operation - unsuspected state. */
  318. #define STAT_UNKNOWN 7
  319. /* Status of operation - operation is not completed yet. */
  320. #define STAT_BUSY 0xFF
  321. #define BCH_MAX_NUM_CORR_CAPS 8
  322. #define BCH_MAX_NUM_SECTOR_SIZES 2
  323. struct cadence_nand_timings {
  324. u32 async_toggle_timings;
  325. u32 timings0;
  326. u32 timings1;
  327. u32 timings2;
  328. u32 dll_phy_ctrl;
  329. u32 phy_ctrl;
  330. u32 phy_dqs_timing;
  331. u32 phy_gate_lpbk_ctrl;
  332. };
  333. /* Command DMA descriptor. */
  334. struct cadence_nand_cdma_desc {
  335. /* Next descriptor address. */
  336. u64 next_pointer;
  337. /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
  338. u32 flash_pointer;
  339. /*field appears in HPNFC version 13*/
  340. u16 bank;
  341. u16 rsvd0;
  342. /* Operation the controller needs to perform. */
  343. u16 command_type;
  344. u16 rsvd1;
  345. /* Flags for operation of this command. */
  346. u16 command_flags;
  347. u16 rsvd2;
  348. /* System/host memory address required for data DMA commands. */
  349. u64 memory_pointer;
  350. /* Status of operation. */
  351. u32 status;
  352. u32 rsvd3;
  353. /* Address pointer to sync buffer location. */
  354. u64 sync_flag_pointer;
  355. /* Controls the buffer sync mechanism. */
  356. u32 sync_arguments;
  357. u32 rsvd4;
  358. /* Control data pointer. */
  359. u64 ctrl_data_ptr;
  360. };
  361. /* Interrupt status. */
  362. struct cadence_nand_irq_status {
  363. /* Thread operation complete status. */
  364. u32 trd_status;
  365. /* Thread operation error. */
  366. u32 trd_error;
  367. /* Controller status. */
  368. u32 status;
  369. };
  370. /* Cadence NAND flash controller capabilities get from driver data. */
  371. struct cadence_nand_dt_devdata {
  372. /* Skew value of the output signals of the NAND Flash interface. */
  373. u32 if_skew;
  374. /* It informs if slave DMA interface is connected to DMA engine. */
  375. unsigned int has_dma:1;
  376. };
  377. /* Cadence NAND flash controller capabilities read from registers. */
  378. struct cdns_nand_caps {
  379. /* Maximum number of banks supported by hardware. */
  380. u8 max_banks;
  381. /* Slave and Master DMA data width in bytes (4 or 8). */
  382. u8 data_dma_width;
  383. /* Control Data feature supported. */
  384. bool data_control_supp;
  385. /* Is PHY type DLL. */
  386. bool is_phy_type_dll;
  387. };
  388. struct cdns_nand_ctrl {
  389. struct device *dev;
  390. struct nand_controller controller;
  391. struct cadence_nand_cdma_desc *cdma_desc;
  392. /* IP capability. */
  393. const struct cadence_nand_dt_devdata *caps1;
  394. struct cdns_nand_caps caps2;
  395. u8 ctrl_rev;
  396. dma_addr_t dma_cdma_desc;
  397. u8 *buf;
  398. u32 buf_size;
  399. u8 curr_corr_str_idx;
  400. /* Register interface. */
  401. void __iomem *reg;
  402. struct {
  403. void __iomem *virt;
  404. dma_addr_t dma;
  405. } io;
  406. int irq;
  407. /* Interrupts that have happened. */
  408. struct cadence_nand_irq_status irq_status;
  409. /* Interrupts we are waiting for. */
  410. struct cadence_nand_irq_status irq_mask;
  411. struct completion complete;
  412. /* Protect irq_mask and irq_status. */
  413. spinlock_t irq_lock;
  414. int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
  415. struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
  416. struct nand_ecc_caps ecc_caps;
  417. int curr_trans_type;
  418. struct dma_chan *dmac;
  419. u32 nf_clk_rate;
  420. /*
  421. * Estimated Board delay. The value includes the total
  422. * round trip delay for the signals and is used for deciding on values
  423. * associated with data read capture.
  424. */
  425. u32 board_delay;
  426. struct nand_chip *selected_chip;
  427. unsigned long assigned_cs;
  428. struct list_head chips;
  429. u8 bch_metadata_size;
  430. };
  431. struct cdns_nand_chip {
  432. struct cadence_nand_timings timings;
  433. struct nand_chip chip;
  434. u8 nsels;
  435. struct list_head node;
  436. /*
  437. * part of oob area of NAND flash memory page.
  438. * This part is available for user to read or write.
  439. */
  440. u32 avail_oob_size;
  441. /* Sector size. There are few sectors per mtd->writesize */
  442. u32 sector_size;
  443. u32 sector_count;
  444. /* Offset of BBM. */
  445. u8 bbm_offs;
  446. /* Number of bytes reserved for BBM. */
  447. u8 bbm_len;
  448. /* ECC strength index. */
  449. u8 corr_str_idx;
  450. u8 cs[];
  451. };
  452. struct ecc_info {
  453. int (*calc_ecc_bytes)(int step_size, int strength);
  454. int max_step_size;
  455. };
  456. static inline struct
  457. cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
  458. {
  459. return container_of(chip, struct cdns_nand_chip, chip);
  460. }
  461. static inline struct
  462. cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
  463. {
  464. return container_of(controller, struct cdns_nand_ctrl, controller);
  465. }
  466. static bool
  467. cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
  468. u32 buf_len)
  469. {
  470. u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
  471. return buf && virt_addr_valid(buf) &&
  472. likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
  473. likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
  474. }
  475. static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
  476. u32 reg_offset, u32 timeout_us,
  477. u32 mask, bool is_clear)
  478. {
  479. u32 val;
  480. int ret;
  481. ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
  482. val, !(val & mask) == is_clear,
  483. 10, timeout_us);
  484. if (ret < 0) {
  485. dev_err(cdns_ctrl->dev,
  486. "Timeout while waiting for reg %x with mask %x is clear %d\n",
  487. reg_offset, mask, is_clear);
  488. }
  489. return ret;
  490. }
  491. static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
  492. bool enable)
  493. {
  494. u32 reg;
  495. if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
  496. 1000000,
  497. CTRL_STATUS_CTRL_BUSY, true))
  498. return -ETIMEDOUT;
  499. reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
  500. if (enable)
  501. reg |= ECC_CONFIG_0_ECC_EN;
  502. else
  503. reg &= ~ECC_CONFIG_0_ECC_EN;
  504. writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
  505. return 0;
  506. }
  507. static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
  508. u8 corr_str_idx)
  509. {
  510. u32 reg;
  511. if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
  512. return;
  513. reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
  514. reg &= ~ECC_CONFIG_0_CORR_STR;
  515. reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
  516. writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
  517. cdns_ctrl->curr_corr_str_idx = corr_str_idx;
  518. }
  519. static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
  520. u8 strength)
  521. {
  522. int i, corr_str_idx = -1;
  523. for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
  524. if (cdns_ctrl->ecc_strengths[i] == strength) {
  525. corr_str_idx = i;
  526. break;
  527. }
  528. }
  529. return corr_str_idx;
  530. }
  531. static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
  532. u16 marker_value)
  533. {
  534. u32 reg;
  535. if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
  536. 1000000,
  537. CTRL_STATUS_CTRL_BUSY, true))
  538. return -ETIMEDOUT;
  539. reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
  540. reg &= ~SKIP_BYTES_MARKER_VALUE;
  541. reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
  542. marker_value);
  543. writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
  544. return 0;
  545. }
  546. static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
  547. u8 num_of_bytes,
  548. u32 offset_value,
  549. int enable)
  550. {
  551. u32 reg, skip_bytes_offset;
  552. if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
  553. 1000000,
  554. CTRL_STATUS_CTRL_BUSY, true))
  555. return -ETIMEDOUT;
  556. if (!enable) {
  557. num_of_bytes = 0;
  558. offset_value = 0;
  559. }
  560. reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
  561. reg &= ~SKIP_BYTES_NUM_OF_BYTES;
  562. reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
  563. num_of_bytes);
  564. skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
  565. offset_value);
  566. writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
  567. writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
  568. return 0;
  569. }
  570. /* Functions enables/disables hardware detection of erased data */
  571. static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
  572. bool enable,
  573. u8 bitflips_threshold)
  574. {
  575. u32 reg;
  576. reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
  577. if (enable)
  578. reg |= ECC_CONFIG_0_ERASE_DET_EN;
  579. else
  580. reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
  581. writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
  582. writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
  583. }
  584. static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
  585. bool bit_bus16)
  586. {
  587. u32 reg;
  588. if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
  589. 1000000,
  590. CTRL_STATUS_CTRL_BUSY, true))
  591. return -ETIMEDOUT;
  592. reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
  593. if (!bit_bus16)
  594. reg &= ~COMMON_SET_DEVICE_16BIT;
  595. else
  596. reg |= COMMON_SET_DEVICE_16BIT;
  597. writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
  598. return 0;
  599. }
  600. static void
  601. cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
  602. struct cadence_nand_irq_status *irq_status)
  603. {
  604. writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
  605. writel_relaxed(irq_status->trd_status,
  606. cdns_ctrl->reg + TRD_COMP_INT_STATUS);
  607. writel_relaxed(irq_status->trd_error,
  608. cdns_ctrl->reg + TRD_ERR_INT_STATUS);
  609. }
  610. static void
  611. cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
  612. struct cadence_nand_irq_status *irq_status)
  613. {
  614. irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
  615. irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
  616. + TRD_COMP_INT_STATUS);
  617. irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
  618. + TRD_ERR_INT_STATUS);
  619. }
  620. static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
  621. struct cadence_nand_irq_status *irq_status)
  622. {
  623. cadence_nand_read_int_status(cdns_ctrl, irq_status);
  624. return irq_status->status || irq_status->trd_status ||
  625. irq_status->trd_error;
  626. }
  627. static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
  628. {
  629. unsigned long flags;
  630. spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
  631. memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
  632. memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
  633. spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
  634. }
  635. /*
  636. * This is the interrupt service routine. It handles all interrupts
  637. * sent to this device.
  638. */
  639. static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
  640. {
  641. struct cdns_nand_ctrl *cdns_ctrl = dev_id;
  642. struct cadence_nand_irq_status irq_status;
  643. irqreturn_t result = IRQ_NONE;
  644. spin_lock(&cdns_ctrl->irq_lock);
  645. if (irq_detected(cdns_ctrl, &irq_status)) {
  646. /* Handle interrupt. */
  647. /* First acknowledge it. */
  648. cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
  649. /* Status in the device context for someone to read. */
  650. cdns_ctrl->irq_status.status |= irq_status.status;
  651. cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
  652. cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
  653. /* Notify anyone who cares that it happened. */
  654. complete(&cdns_ctrl->complete);
  655. /* Tell the OS that we've handled this. */
  656. result = IRQ_HANDLED;
  657. }
  658. spin_unlock(&cdns_ctrl->irq_lock);
  659. return result;
  660. }
  661. static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
  662. struct cadence_nand_irq_status *irq_mask)
  663. {
  664. writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
  665. cdns_ctrl->reg + INTR_ENABLE);
  666. writel_relaxed(irq_mask->trd_error,
  667. cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
  668. }
  669. static void
  670. cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
  671. struct cadence_nand_irq_status *irq_mask,
  672. struct cadence_nand_irq_status *irq_status)
  673. {
  674. unsigned long timeout = msecs_to_jiffies(10000);
  675. unsigned long time_left;
  676. time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
  677. timeout);
  678. *irq_status = cdns_ctrl->irq_status;
  679. if (time_left == 0) {
  680. /* Timeout error. */
  681. dev_err(cdns_ctrl->dev, "timeout occurred:\n");
  682. dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
  683. irq_status->status, irq_mask->status);
  684. dev_err(cdns_ctrl->dev,
  685. "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
  686. irq_status->trd_status, irq_mask->trd_status);
  687. dev_err(cdns_ctrl->dev,
  688. "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
  689. irq_status->trd_error, irq_mask->trd_error);
  690. }
  691. }
  692. /* Execute generic command on NAND controller. */
  693. static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
  694. u8 chip_nr,
  695. u64 mini_ctrl_cmd)
  696. {
  697. u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
  698. mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
  699. mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
  700. mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
  701. if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
  702. 1000000,
  703. CTRL_STATUS_CTRL_BUSY, true))
  704. return -ETIMEDOUT;
  705. cadence_nand_reset_irq(cdns_ctrl);
  706. writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
  707. writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
  708. /* Select generic command. */
  709. reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
  710. /* Thread number. */
  711. reg |= FIELD_PREP(CMD_REG0_TN, 0);
  712. /* Issue command. */
  713. writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
  714. return 0;
  715. }
  716. /* Wait for data on slave DMA interface. */
  717. static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
  718. u8 *out_sdma_trd,
  719. u32 *out_sdma_size)
  720. {
  721. struct cadence_nand_irq_status irq_mask, irq_status;
  722. irq_mask.trd_status = 0;
  723. irq_mask.trd_error = 0;
  724. irq_mask.status = INTR_STATUS_SDMA_TRIGG
  725. | INTR_STATUS_SDMA_ERR
  726. | INTR_STATUS_UNSUPP_CMD;
  727. cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
  728. cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
  729. if (irq_status.status == 0) {
  730. dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
  731. return -ETIMEDOUT;
  732. }
  733. if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
  734. *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
  735. *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
  736. *out_sdma_trd =
  737. FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
  738. } else {
  739. dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
  740. irq_status.status);
  741. return -EIO;
  742. }
  743. return 0;
  744. }
  745. static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
  746. {
  747. u32 reg;
  748. reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
  749. cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
  750. if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
  751. cdns_ctrl->caps2.data_dma_width = 8;
  752. else
  753. cdns_ctrl->caps2.data_dma_width = 4;
  754. if (reg & CTRL_FEATURES_CONTROL_DATA)
  755. cdns_ctrl->caps2.data_control_supp = true;
  756. if (reg & (CTRL_FEATURES_NVDDR_2_3
  757. | CTRL_FEATURES_NVDDR))
  758. cdns_ctrl->caps2.is_phy_type_dll = true;
  759. }
  760. /* Prepare CDMA descriptor. */
  761. static void
  762. cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
  763. char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
  764. dma_addr_t ctrl_data_ptr, u16 ctype)
  765. {
  766. struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
  767. memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
  768. /* Set fields for one descriptor. */
  769. cdma_desc->flash_pointer = flash_ptr;
  770. if (cdns_ctrl->ctrl_rev >= 13)
  771. cdma_desc->bank = nf_mem;
  772. else
  773. cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
  774. cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
  775. cdma_desc->command_flags |= CDMA_CF_INT;
  776. cdma_desc->memory_pointer = mem_ptr;
  777. cdma_desc->status = 0;
  778. cdma_desc->sync_flag_pointer = 0;
  779. cdma_desc->sync_arguments = 0;
  780. cdma_desc->command_type = ctype;
  781. cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
  782. }
  783. static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
  784. u32 desc_status)
  785. {
  786. if (desc_status & CDMA_CS_ERP)
  787. return STAT_ERASED;
  788. if (desc_status & CDMA_CS_UNCE)
  789. return STAT_ECC_UNCORR;
  790. if (desc_status & CDMA_CS_ERR) {
  791. dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
  792. return STAT_FAIL;
  793. }
  794. if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
  795. return STAT_ECC_CORR;
  796. return STAT_FAIL;
  797. }
  798. static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
  799. {
  800. struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
  801. u8 status = STAT_BUSY;
  802. if (desc_ptr->status & CDMA_CS_FAIL) {
  803. status = cadence_nand_check_desc_error(cdns_ctrl,
  804. desc_ptr->status);
  805. dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
  806. } else if (desc_ptr->status & CDMA_CS_COMP) {
  807. /* Descriptor finished with no errors. */
  808. if (desc_ptr->command_flags & CDMA_CF_CONT) {
  809. dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
  810. status = STAT_UNKNOWN;
  811. } else {
  812. /* Last descriptor. */
  813. status = STAT_OK;
  814. }
  815. }
  816. return status;
  817. }
  818. static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
  819. u8 thread)
  820. {
  821. u32 reg;
  822. int status;
  823. /* Wait for thread ready. */
  824. status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
  825. 1000000,
  826. BIT(thread), true);
  827. if (status)
  828. return status;
  829. cadence_nand_reset_irq(cdns_ctrl);
  830. reinit_completion(&cdns_ctrl->complete);
  831. writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
  832. cdns_ctrl->reg + CMD_REG2);
  833. writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
  834. /* Select CDMA mode. */
  835. reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
  836. /* Thread number. */
  837. reg |= FIELD_PREP(CMD_REG0_TN, thread);
  838. /* Issue command. */
  839. writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
  840. return 0;
  841. }
  842. /* Send SDMA command and wait for finish. */
  843. static u32
  844. cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
  845. u8 thread)
  846. {
  847. struct cadence_nand_irq_status irq_mask, irq_status = {0};
  848. int status;
  849. irq_mask.trd_status = BIT(thread);
  850. irq_mask.trd_error = BIT(thread);
  851. irq_mask.status = INTR_STATUS_CDMA_TERR;
  852. cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
  853. status = cadence_nand_cdma_send(cdns_ctrl, thread);
  854. if (status)
  855. return status;
  856. cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
  857. if (irq_status.status == 0 && irq_status.trd_status == 0 &&
  858. irq_status.trd_error == 0) {
  859. dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
  860. return -ETIMEDOUT;
  861. }
  862. if (irq_status.status & irq_mask.status) {
  863. dev_err(cdns_ctrl->dev, "CDMA command failed\n");
  864. return -EIO;
  865. }
  866. return 0;
  867. }
  868. /*
  869. * ECC size depends on configured ECC strength and on maximum supported
  870. * ECC step size.
  871. */
  872. static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
  873. {
  874. int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
  875. return ALIGN(nbytes, 2);
  876. }
  877. #define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
  878. static int \
  879. cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
  880. int strength)\
  881. {\
  882. return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
  883. }
  884. CADENCE_NAND_CALC_ECC_BYTES(256)
  885. CADENCE_NAND_CALC_ECC_BYTES(512)
  886. CADENCE_NAND_CALC_ECC_BYTES(1024)
  887. CADENCE_NAND_CALC_ECC_BYTES(2048)
  888. CADENCE_NAND_CALC_ECC_BYTES(4096)
  889. /* Function reads BCH capabilities. */
  890. static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
  891. {
  892. struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
  893. int max_step_size = 0, nstrengths, i;
  894. u32 reg;
  895. reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
  896. cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
  897. if (cdns_ctrl->bch_metadata_size < 4) {
  898. dev_err(cdns_ctrl->dev,
  899. "Driver needs at least 4 bytes of BCH meta data\n");
  900. return -EIO;
  901. }
  902. reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
  903. cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
  904. cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
  905. cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
  906. cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
  907. reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
  908. cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
  909. cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
  910. cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
  911. cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
  912. reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
  913. cdns_ctrl->ecc_stepinfos[0].stepsize =
  914. FIELD_GET(BCH_CFG_2_SECT_0, reg);
  915. cdns_ctrl->ecc_stepinfos[1].stepsize =
  916. FIELD_GET(BCH_CFG_2_SECT_1, reg);
  917. nstrengths = 0;
  918. for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
  919. if (cdns_ctrl->ecc_strengths[i] != 0)
  920. nstrengths++;
  921. }
  922. ecc_caps->nstepinfos = 0;
  923. for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
  924. /* ECC strengths are common for all step infos. */
  925. cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
  926. cdns_ctrl->ecc_stepinfos[i].strengths =
  927. cdns_ctrl->ecc_strengths;
  928. if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
  929. ecc_caps->nstepinfos++;
  930. if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
  931. max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
  932. }
  933. ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
  934. switch (max_step_size) {
  935. case 256:
  936. ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
  937. break;
  938. case 512:
  939. ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
  940. break;
  941. case 1024:
  942. ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
  943. break;
  944. case 2048:
  945. ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
  946. break;
  947. case 4096:
  948. ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
  949. break;
  950. default:
  951. dev_err(cdns_ctrl->dev,
  952. "Unsupported sector size(ecc step size) %d\n",
  953. max_step_size);
  954. return -EIO;
  955. }
  956. return 0;
  957. }
  958. /* Hardware initialization. */
  959. static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
  960. {
  961. int status;
  962. u32 reg;
  963. status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
  964. 1000000,
  965. CTRL_STATUS_INIT_COMP, false);
  966. if (status)
  967. return status;
  968. reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
  969. cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
  970. dev_info(cdns_ctrl->dev,
  971. "%s: cadence nand controller version reg %x\n",
  972. __func__, reg);
  973. /* Disable cache and multiplane. */
  974. writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
  975. writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
  976. /* Clear all interrupts. */
  977. writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
  978. cadence_nand_get_caps(cdns_ctrl);
  979. if (cadence_nand_read_bch_caps(cdns_ctrl))
  980. return -EIO;
  981. /*
  982. * Set IO width access to 8.
  983. * It is because during SW device discovering width access
  984. * is expected to be 8.
  985. */
  986. status = cadence_nand_set_access_width16(cdns_ctrl, false);
  987. return status;
  988. }
  989. #define TT_MAIN_OOB_AREAS 2
  990. #define TT_RAW_PAGE 3
  991. #define TT_BBM 4
  992. #define TT_MAIN_OOB_AREA_EXT 5
  993. /* Prepare size of data to transfer. */
  994. static void
  995. cadence_nand_prepare_data_size(struct nand_chip *chip,
  996. int transfer_type)
  997. {
  998. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  999. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1000. struct mtd_info *mtd = nand_to_mtd(chip);
  1001. u32 sec_size = 0, offset = 0, sec_cnt = 1;
  1002. u32 last_sec_size = cdns_chip->sector_size;
  1003. u32 data_ctrl_size = 0;
  1004. u32 reg = 0;
  1005. if (cdns_ctrl->curr_trans_type == transfer_type)
  1006. return;
  1007. switch (transfer_type) {
  1008. case TT_MAIN_OOB_AREA_EXT:
  1009. sec_cnt = cdns_chip->sector_count;
  1010. sec_size = cdns_chip->sector_size;
  1011. data_ctrl_size = cdns_chip->avail_oob_size;
  1012. break;
  1013. case TT_MAIN_OOB_AREAS:
  1014. sec_cnt = cdns_chip->sector_count;
  1015. last_sec_size = cdns_chip->sector_size
  1016. + cdns_chip->avail_oob_size;
  1017. sec_size = cdns_chip->sector_size;
  1018. break;
  1019. case TT_RAW_PAGE:
  1020. last_sec_size = mtd->writesize + mtd->oobsize;
  1021. break;
  1022. case TT_BBM:
  1023. offset = mtd->writesize + cdns_chip->bbm_offs;
  1024. last_sec_size = 8;
  1025. break;
  1026. }
  1027. reg = 0;
  1028. reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
  1029. reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
  1030. writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
  1031. reg = 0;
  1032. reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
  1033. reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
  1034. writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
  1035. if (cdns_ctrl->caps2.data_control_supp) {
  1036. reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
  1037. reg &= ~CONTROL_DATA_CTRL_SIZE;
  1038. reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
  1039. writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
  1040. }
  1041. cdns_ctrl->curr_trans_type = transfer_type;
  1042. }
  1043. static int
  1044. cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
  1045. int page, void *buf, void *ctrl_dat, u32 buf_size,
  1046. u32 ctrl_dat_size, enum dma_data_direction dir,
  1047. bool with_ecc)
  1048. {
  1049. dma_addr_t dma_buf, dma_ctrl_dat = 0;
  1050. u8 thread_nr = chip_nr;
  1051. int status;
  1052. u16 ctype;
  1053. if (dir == DMA_FROM_DEVICE)
  1054. ctype = CDMA_CT_RD;
  1055. else
  1056. ctype = CDMA_CT_WR;
  1057. cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
  1058. dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
  1059. if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
  1060. dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
  1061. return -EIO;
  1062. }
  1063. if (ctrl_dat && ctrl_dat_size) {
  1064. dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
  1065. ctrl_dat_size, dir);
  1066. if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
  1067. dma_unmap_single(cdns_ctrl->dev, dma_buf,
  1068. buf_size, dir);
  1069. dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
  1070. return -EIO;
  1071. }
  1072. }
  1073. cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
  1074. dma_buf, dma_ctrl_dat, ctype);
  1075. status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
  1076. dma_unmap_single(cdns_ctrl->dev, dma_buf,
  1077. buf_size, dir);
  1078. if (ctrl_dat && ctrl_dat_size)
  1079. dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
  1080. ctrl_dat_size, dir);
  1081. if (status)
  1082. return status;
  1083. return cadence_nand_cdma_finish(cdns_ctrl);
  1084. }
  1085. static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
  1086. struct cadence_nand_timings *t)
  1087. {
  1088. writel_relaxed(t->async_toggle_timings,
  1089. cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
  1090. writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
  1091. writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
  1092. writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
  1093. if (cdns_ctrl->caps2.is_phy_type_dll)
  1094. writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
  1095. writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
  1096. if (cdns_ctrl->caps2.is_phy_type_dll) {
  1097. writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
  1098. writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
  1099. writel_relaxed(t->phy_dqs_timing,
  1100. cdns_ctrl->reg + PHY_DQS_TIMING);
  1101. writel_relaxed(t->phy_gate_lpbk_ctrl,
  1102. cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
  1103. writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
  1104. cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
  1105. writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
  1106. }
  1107. }
  1108. static int cadence_nand_select_target(struct nand_chip *chip)
  1109. {
  1110. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1111. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1112. if (chip == cdns_ctrl->selected_chip)
  1113. return 0;
  1114. if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
  1115. 1000000,
  1116. CTRL_STATUS_CTRL_BUSY, true))
  1117. return -ETIMEDOUT;
  1118. cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
  1119. cadence_nand_set_ecc_strength(cdns_ctrl,
  1120. cdns_chip->corr_str_idx);
  1121. cadence_nand_set_erase_detection(cdns_ctrl, true,
  1122. chip->ecc.strength);
  1123. cdns_ctrl->curr_trans_type = -1;
  1124. cdns_ctrl->selected_chip = chip;
  1125. return 0;
  1126. }
  1127. static int cadence_nand_erase(struct nand_chip *chip, u32 page)
  1128. {
  1129. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1130. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1131. int status;
  1132. u8 thread_nr = cdns_chip->cs[chip->cur_cs];
  1133. cadence_nand_cdma_desc_prepare(cdns_ctrl,
  1134. cdns_chip->cs[chip->cur_cs],
  1135. page, 0, 0,
  1136. CDMA_CT_ERASE);
  1137. status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
  1138. if (status) {
  1139. dev_err(cdns_ctrl->dev, "erase operation failed\n");
  1140. return -EIO;
  1141. }
  1142. status = cadence_nand_cdma_finish(cdns_ctrl);
  1143. if (status)
  1144. return status;
  1145. return 0;
  1146. }
  1147. static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
  1148. {
  1149. int status;
  1150. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1151. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1152. struct mtd_info *mtd = nand_to_mtd(chip);
  1153. cadence_nand_prepare_data_size(chip, TT_BBM);
  1154. cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
  1155. /*
  1156. * Read only bad block marker from offset
  1157. * defined by a memory manufacturer.
  1158. */
  1159. status = cadence_nand_cdma_transfer(cdns_ctrl,
  1160. cdns_chip->cs[chip->cur_cs],
  1161. page, cdns_ctrl->buf, NULL,
  1162. mtd->oobsize,
  1163. 0, DMA_FROM_DEVICE, false);
  1164. if (status) {
  1165. dev_err(cdns_ctrl->dev, "read BBM failed\n");
  1166. return -EIO;
  1167. }
  1168. memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
  1169. return 0;
  1170. }
  1171. static int cadence_nand_write_page(struct nand_chip *chip,
  1172. const u8 *buf, int oob_required,
  1173. int page)
  1174. {
  1175. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1176. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1177. struct mtd_info *mtd = nand_to_mtd(chip);
  1178. int status;
  1179. u16 marker_val = 0xFFFF;
  1180. status = cadence_nand_select_target(chip);
  1181. if (status)
  1182. return status;
  1183. cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
  1184. mtd->writesize
  1185. + cdns_chip->bbm_offs,
  1186. 1);
  1187. if (oob_required) {
  1188. marker_val = *(u16 *)(chip->oob_poi
  1189. + cdns_chip->bbm_offs);
  1190. } else {
  1191. /* Set oob data to 0xFF. */
  1192. memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
  1193. cdns_chip->avail_oob_size);
  1194. }
  1195. cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
  1196. cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
  1197. if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
  1198. cdns_ctrl->caps2.data_control_supp) {
  1199. u8 *oob;
  1200. if (oob_required)
  1201. oob = chip->oob_poi;
  1202. else
  1203. oob = cdns_ctrl->buf + mtd->writesize;
  1204. status = cadence_nand_cdma_transfer(cdns_ctrl,
  1205. cdns_chip->cs[chip->cur_cs],
  1206. page, (void *)buf, oob,
  1207. mtd->writesize,
  1208. cdns_chip->avail_oob_size,
  1209. DMA_TO_DEVICE, true);
  1210. if (status) {
  1211. dev_err(cdns_ctrl->dev, "write page failed\n");
  1212. return -EIO;
  1213. }
  1214. return 0;
  1215. }
  1216. if (oob_required) {
  1217. /* Transfer the data to the oob area. */
  1218. memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
  1219. cdns_chip->avail_oob_size);
  1220. }
  1221. memcpy(cdns_ctrl->buf, buf, mtd->writesize);
  1222. cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
  1223. return cadence_nand_cdma_transfer(cdns_ctrl,
  1224. cdns_chip->cs[chip->cur_cs],
  1225. page, cdns_ctrl->buf, NULL,
  1226. mtd->writesize
  1227. + cdns_chip->avail_oob_size,
  1228. 0, DMA_TO_DEVICE, true);
  1229. }
  1230. static int cadence_nand_write_oob(struct nand_chip *chip, int page)
  1231. {
  1232. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1233. struct mtd_info *mtd = nand_to_mtd(chip);
  1234. memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
  1235. return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
  1236. }
  1237. static int cadence_nand_write_page_raw(struct nand_chip *chip,
  1238. const u8 *buf, int oob_required,
  1239. int page)
  1240. {
  1241. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1242. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1243. struct mtd_info *mtd = nand_to_mtd(chip);
  1244. int writesize = mtd->writesize;
  1245. int oobsize = mtd->oobsize;
  1246. int ecc_steps = chip->ecc.steps;
  1247. int ecc_size = chip->ecc.size;
  1248. int ecc_bytes = chip->ecc.bytes;
  1249. void *tmp_buf = cdns_ctrl->buf;
  1250. int oob_skip = cdns_chip->bbm_len;
  1251. size_t size = writesize + oobsize;
  1252. int i, pos, len;
  1253. int status = 0;
  1254. status = cadence_nand_select_target(chip);
  1255. if (status)
  1256. return status;
  1257. /*
  1258. * Fill the buffer with 0xff first except the full page transfer.
  1259. * This simplifies the logic.
  1260. */
  1261. if (!buf || !oob_required)
  1262. memset(tmp_buf, 0xff, size);
  1263. cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
  1264. /* Arrange the buffer for syndrome payload/ecc layout. */
  1265. if (buf) {
  1266. for (i = 0; i < ecc_steps; i++) {
  1267. pos = i * (ecc_size + ecc_bytes);
  1268. len = ecc_size;
  1269. if (pos >= writesize)
  1270. pos += oob_skip;
  1271. else if (pos + len > writesize)
  1272. len = writesize - pos;
  1273. memcpy(tmp_buf + pos, buf, len);
  1274. buf += len;
  1275. if (len < ecc_size) {
  1276. len = ecc_size - len;
  1277. memcpy(tmp_buf + writesize + oob_skip, buf,
  1278. len);
  1279. buf += len;
  1280. }
  1281. }
  1282. }
  1283. if (oob_required) {
  1284. const u8 *oob = chip->oob_poi;
  1285. u32 oob_data_offset = (cdns_chip->sector_count - 1) *
  1286. (cdns_chip->sector_size + chip->ecc.bytes)
  1287. + cdns_chip->sector_size + oob_skip;
  1288. /* BBM at the beginning of the OOB area. */
  1289. memcpy(tmp_buf + writesize, oob, oob_skip);
  1290. /* OOB free. */
  1291. memcpy(tmp_buf + oob_data_offset, oob,
  1292. cdns_chip->avail_oob_size);
  1293. oob += cdns_chip->avail_oob_size;
  1294. /* OOB ECC. */
  1295. for (i = 0; i < ecc_steps; i++) {
  1296. pos = ecc_size + i * (ecc_size + ecc_bytes);
  1297. if (i == (ecc_steps - 1))
  1298. pos += cdns_chip->avail_oob_size;
  1299. len = ecc_bytes;
  1300. if (pos >= writesize)
  1301. pos += oob_skip;
  1302. else if (pos + len > writesize)
  1303. len = writesize - pos;
  1304. memcpy(tmp_buf + pos, oob, len);
  1305. oob += len;
  1306. if (len < ecc_bytes) {
  1307. len = ecc_bytes - len;
  1308. memcpy(tmp_buf + writesize + oob_skip, oob,
  1309. len);
  1310. oob += len;
  1311. }
  1312. }
  1313. }
  1314. cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
  1315. return cadence_nand_cdma_transfer(cdns_ctrl,
  1316. cdns_chip->cs[chip->cur_cs],
  1317. page, cdns_ctrl->buf, NULL,
  1318. mtd->writesize +
  1319. mtd->oobsize,
  1320. 0, DMA_TO_DEVICE, false);
  1321. }
  1322. static int cadence_nand_write_oob_raw(struct nand_chip *chip,
  1323. int page)
  1324. {
  1325. return cadence_nand_write_page_raw(chip, NULL, true, page);
  1326. }
  1327. static int cadence_nand_read_page(struct nand_chip *chip,
  1328. u8 *buf, int oob_required, int page)
  1329. {
  1330. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1331. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1332. struct mtd_info *mtd = nand_to_mtd(chip);
  1333. int status = 0;
  1334. int ecc_err_count = 0;
  1335. status = cadence_nand_select_target(chip);
  1336. if (status)
  1337. return status;
  1338. cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
  1339. mtd->writesize
  1340. + cdns_chip->bbm_offs, 1);
  1341. /*
  1342. * If data buffer can be accessed by DMA and data_control feature
  1343. * is supported then transfer data and oob directly.
  1344. */
  1345. if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
  1346. cdns_ctrl->caps2.data_control_supp) {
  1347. u8 *oob;
  1348. if (oob_required)
  1349. oob = chip->oob_poi;
  1350. else
  1351. oob = cdns_ctrl->buf + mtd->writesize;
  1352. cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
  1353. status = cadence_nand_cdma_transfer(cdns_ctrl,
  1354. cdns_chip->cs[chip->cur_cs],
  1355. page, buf, oob,
  1356. mtd->writesize,
  1357. cdns_chip->avail_oob_size,
  1358. DMA_FROM_DEVICE, true);
  1359. /* Otherwise use bounce buffer. */
  1360. } else {
  1361. cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
  1362. status = cadence_nand_cdma_transfer(cdns_ctrl,
  1363. cdns_chip->cs[chip->cur_cs],
  1364. page, cdns_ctrl->buf,
  1365. NULL, mtd->writesize
  1366. + cdns_chip->avail_oob_size,
  1367. 0, DMA_FROM_DEVICE, true);
  1368. memcpy(buf, cdns_ctrl->buf, mtd->writesize);
  1369. if (oob_required)
  1370. memcpy(chip->oob_poi,
  1371. cdns_ctrl->buf + mtd->writesize,
  1372. mtd->oobsize);
  1373. }
  1374. switch (status) {
  1375. case STAT_ECC_UNCORR:
  1376. mtd->ecc_stats.failed++;
  1377. ecc_err_count++;
  1378. break;
  1379. case STAT_ECC_CORR:
  1380. ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
  1381. cdns_ctrl->cdma_desc->status);
  1382. mtd->ecc_stats.corrected += ecc_err_count;
  1383. break;
  1384. case STAT_ERASED:
  1385. case STAT_OK:
  1386. break;
  1387. default:
  1388. dev_err(cdns_ctrl->dev, "read page failed\n");
  1389. return -EIO;
  1390. }
  1391. if (oob_required)
  1392. if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
  1393. return -EIO;
  1394. return ecc_err_count;
  1395. }
  1396. /* Reads OOB data from the device. */
  1397. static int cadence_nand_read_oob(struct nand_chip *chip, int page)
  1398. {
  1399. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1400. return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
  1401. }
  1402. static int cadence_nand_read_page_raw(struct nand_chip *chip,
  1403. u8 *buf, int oob_required, int page)
  1404. {
  1405. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1406. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1407. struct mtd_info *mtd = nand_to_mtd(chip);
  1408. int oob_skip = cdns_chip->bbm_len;
  1409. int writesize = mtd->writesize;
  1410. int ecc_steps = chip->ecc.steps;
  1411. int ecc_size = chip->ecc.size;
  1412. int ecc_bytes = chip->ecc.bytes;
  1413. void *tmp_buf = cdns_ctrl->buf;
  1414. int i, pos, len;
  1415. int status = 0;
  1416. status = cadence_nand_select_target(chip);
  1417. if (status)
  1418. return status;
  1419. cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
  1420. cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
  1421. status = cadence_nand_cdma_transfer(cdns_ctrl,
  1422. cdns_chip->cs[chip->cur_cs],
  1423. page, cdns_ctrl->buf, NULL,
  1424. mtd->writesize
  1425. + mtd->oobsize,
  1426. 0, DMA_FROM_DEVICE, false);
  1427. switch (status) {
  1428. case STAT_ERASED:
  1429. case STAT_OK:
  1430. break;
  1431. default:
  1432. dev_err(cdns_ctrl->dev, "read raw page failed\n");
  1433. return -EIO;
  1434. }
  1435. /* Arrange the buffer for syndrome payload/ecc layout. */
  1436. if (buf) {
  1437. for (i = 0; i < ecc_steps; i++) {
  1438. pos = i * (ecc_size + ecc_bytes);
  1439. len = ecc_size;
  1440. if (pos >= writesize)
  1441. pos += oob_skip;
  1442. else if (pos + len > writesize)
  1443. len = writesize - pos;
  1444. memcpy(buf, tmp_buf + pos, len);
  1445. buf += len;
  1446. if (len < ecc_size) {
  1447. len = ecc_size - len;
  1448. memcpy(buf, tmp_buf + writesize + oob_skip,
  1449. len);
  1450. buf += len;
  1451. }
  1452. }
  1453. }
  1454. if (oob_required) {
  1455. u8 *oob = chip->oob_poi;
  1456. u32 oob_data_offset = (cdns_chip->sector_count - 1) *
  1457. (cdns_chip->sector_size + chip->ecc.bytes)
  1458. + cdns_chip->sector_size + oob_skip;
  1459. /* OOB free. */
  1460. memcpy(oob, tmp_buf + oob_data_offset,
  1461. cdns_chip->avail_oob_size);
  1462. /* BBM at the beginning of the OOB area. */
  1463. memcpy(oob, tmp_buf + writesize, oob_skip);
  1464. oob += cdns_chip->avail_oob_size;
  1465. /* OOB ECC */
  1466. for (i = 0; i < ecc_steps; i++) {
  1467. pos = ecc_size + i * (ecc_size + ecc_bytes);
  1468. len = ecc_bytes;
  1469. if (i == (ecc_steps - 1))
  1470. pos += cdns_chip->avail_oob_size;
  1471. if (pos >= writesize)
  1472. pos += oob_skip;
  1473. else if (pos + len > writesize)
  1474. len = writesize - pos;
  1475. memcpy(oob, tmp_buf + pos, len);
  1476. oob += len;
  1477. if (len < ecc_bytes) {
  1478. len = ecc_bytes - len;
  1479. memcpy(oob, tmp_buf + writesize + oob_skip,
  1480. len);
  1481. oob += len;
  1482. }
  1483. }
  1484. }
  1485. return 0;
  1486. }
  1487. static int cadence_nand_read_oob_raw(struct nand_chip *chip,
  1488. int page)
  1489. {
  1490. return cadence_nand_read_page_raw(chip, NULL, true, page);
  1491. }
  1492. static void cadence_nand_slave_dma_transfer_finished(void *data)
  1493. {
  1494. struct completion *finished = data;
  1495. complete(finished);
  1496. }
  1497. static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
  1498. void *buf,
  1499. dma_addr_t dev_dma, size_t len,
  1500. enum dma_data_direction dir)
  1501. {
  1502. DECLARE_COMPLETION_ONSTACK(finished);
  1503. struct dma_chan *chan;
  1504. struct dma_device *dma_dev;
  1505. dma_addr_t src_dma, dst_dma, buf_dma;
  1506. struct dma_async_tx_descriptor *tx;
  1507. dma_cookie_t cookie;
  1508. chan = cdns_ctrl->dmac;
  1509. dma_dev = chan->device;
  1510. buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
  1511. if (dma_mapping_error(dma_dev->dev, buf_dma)) {
  1512. dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
  1513. goto err;
  1514. }
  1515. if (dir == DMA_FROM_DEVICE) {
  1516. src_dma = cdns_ctrl->io.dma;
  1517. dst_dma = buf_dma;
  1518. } else {
  1519. src_dma = buf_dma;
  1520. dst_dma = cdns_ctrl->io.dma;
  1521. }
  1522. tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
  1523. DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
  1524. if (!tx) {
  1525. dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
  1526. goto err_unmap;
  1527. }
  1528. tx->callback = cadence_nand_slave_dma_transfer_finished;
  1529. tx->callback_param = &finished;
  1530. cookie = dmaengine_submit(tx);
  1531. if (dma_submit_error(cookie)) {
  1532. dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
  1533. goto err_unmap;
  1534. }
  1535. dma_async_issue_pending(cdns_ctrl->dmac);
  1536. wait_for_completion(&finished);
  1537. dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
  1538. return 0;
  1539. err_unmap:
  1540. dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
  1541. err:
  1542. dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
  1543. return -EIO;
  1544. }
  1545. static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
  1546. u8 *buf, int len)
  1547. {
  1548. u8 thread_nr = 0;
  1549. u32 sdma_size;
  1550. int status;
  1551. /* Wait until slave DMA interface is ready to data transfer. */
  1552. status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
  1553. if (status)
  1554. return status;
  1555. if (!cdns_ctrl->caps1->has_dma) {
  1556. int len_in_words = len >> 2;
  1557. /* read alingment data */
  1558. ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
  1559. if (sdma_size > len) {
  1560. /* read rest data from slave DMA interface if any */
  1561. ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
  1562. sdma_size / 4 - len_in_words);
  1563. /* copy rest of data */
  1564. memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
  1565. len - (len_in_words << 2));
  1566. }
  1567. return 0;
  1568. }
  1569. if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
  1570. status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
  1571. cdns_ctrl->io.dma,
  1572. len, DMA_FROM_DEVICE);
  1573. if (status == 0)
  1574. return 0;
  1575. dev_warn(cdns_ctrl->dev,
  1576. "Slave DMA transfer failed. Try again using bounce buffer.");
  1577. }
  1578. /* If DMA transfer is not possible or failed then use bounce buffer. */
  1579. status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
  1580. cdns_ctrl->io.dma,
  1581. sdma_size, DMA_FROM_DEVICE);
  1582. if (status) {
  1583. dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
  1584. return status;
  1585. }
  1586. memcpy(buf, cdns_ctrl->buf, len);
  1587. return 0;
  1588. }
  1589. static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
  1590. const u8 *buf, int len)
  1591. {
  1592. u8 thread_nr = 0;
  1593. u32 sdma_size;
  1594. int status;
  1595. /* Wait until slave DMA interface is ready to data transfer. */
  1596. status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
  1597. if (status)
  1598. return status;
  1599. if (!cdns_ctrl->caps1->has_dma) {
  1600. int len_in_words = len >> 2;
  1601. iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
  1602. if (sdma_size > len) {
  1603. /* copy rest of data */
  1604. memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
  1605. len - (len_in_words << 2));
  1606. /* write all expected by nand controller data */
  1607. iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
  1608. sdma_size / 4 - len_in_words);
  1609. }
  1610. return 0;
  1611. }
  1612. if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
  1613. status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
  1614. cdns_ctrl->io.dma,
  1615. len, DMA_TO_DEVICE);
  1616. if (status == 0)
  1617. return 0;
  1618. dev_warn(cdns_ctrl->dev,
  1619. "Slave DMA transfer failed. Try again using bounce buffer.");
  1620. }
  1621. /* If DMA transfer is not possible or failed then use bounce buffer. */
  1622. memcpy(cdns_ctrl->buf, buf, len);
  1623. status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
  1624. cdns_ctrl->io.dma,
  1625. sdma_size, DMA_TO_DEVICE);
  1626. if (status)
  1627. dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
  1628. return status;
  1629. }
  1630. static int cadence_nand_force_byte_access(struct nand_chip *chip,
  1631. bool force_8bit)
  1632. {
  1633. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1634. /*
  1635. * Callers of this function do not verify if the NAND is using a 16-bit
  1636. * an 8-bit bus for normal operations, so we need to take care of that
  1637. * here by leaving the configuration unchanged if the NAND does not have
  1638. * the NAND_BUSWIDTH_16 flag set.
  1639. */
  1640. if (!(chip->options & NAND_BUSWIDTH_16))
  1641. return 0;
  1642. return cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
  1643. }
  1644. static int cadence_nand_cmd_opcode(struct nand_chip *chip,
  1645. const struct nand_subop *subop)
  1646. {
  1647. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1648. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1649. const struct nand_op_instr *instr;
  1650. unsigned int op_id = 0;
  1651. u64 mini_ctrl_cmd = 0;
  1652. int ret;
  1653. instr = &subop->instrs[op_id];
  1654. if (instr->delay_ns > 0)
  1655. mini_ctrl_cmd |= GCMD_LAY_TWB;
  1656. mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
  1657. GCMD_LAY_INSTR_CMD);
  1658. mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
  1659. instr->ctx.cmd.opcode);
  1660. ret = cadence_nand_generic_cmd_send(cdns_ctrl,
  1661. cdns_chip->cs[chip->cur_cs],
  1662. mini_ctrl_cmd);
  1663. if (ret)
  1664. dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
  1665. instr->ctx.cmd.opcode);
  1666. return ret;
  1667. }
  1668. static int cadence_nand_cmd_address(struct nand_chip *chip,
  1669. const struct nand_subop *subop)
  1670. {
  1671. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1672. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1673. const struct nand_op_instr *instr;
  1674. unsigned int op_id = 0;
  1675. u64 mini_ctrl_cmd = 0;
  1676. unsigned int offset, naddrs;
  1677. u64 address = 0;
  1678. const u8 *addrs;
  1679. int ret;
  1680. int i;
  1681. instr = &subop->instrs[op_id];
  1682. if (instr->delay_ns > 0)
  1683. mini_ctrl_cmd |= GCMD_LAY_TWB;
  1684. mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
  1685. GCMD_LAY_INSTR_ADDR);
  1686. offset = nand_subop_get_addr_start_off(subop, op_id);
  1687. naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
  1688. addrs = &instr->ctx.addr.addrs[offset];
  1689. for (i = 0; i < naddrs; i++)
  1690. address |= (u64)addrs[i] << (8 * i);
  1691. mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
  1692. address);
  1693. mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
  1694. naddrs - 1);
  1695. ret = cadence_nand_generic_cmd_send(cdns_ctrl,
  1696. cdns_chip->cs[chip->cur_cs],
  1697. mini_ctrl_cmd);
  1698. if (ret)
  1699. dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
  1700. return ret;
  1701. }
  1702. static int cadence_nand_cmd_erase(struct nand_chip *chip,
  1703. const struct nand_subop *subop)
  1704. {
  1705. unsigned int op_id;
  1706. if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
  1707. int i;
  1708. const struct nand_op_instr *instr = NULL;
  1709. unsigned int offset, naddrs;
  1710. const u8 *addrs;
  1711. u32 page = 0;
  1712. instr = &subop->instrs[1];
  1713. offset = nand_subop_get_addr_start_off(subop, 1);
  1714. naddrs = nand_subop_get_num_addr_cyc(subop, 1);
  1715. addrs = &instr->ctx.addr.addrs[offset];
  1716. for (i = 0; i < naddrs; i++)
  1717. page |= (u32)addrs[i] << (8 * i);
  1718. return cadence_nand_erase(chip, page);
  1719. }
  1720. /*
  1721. * If it is not an erase operation then handle operation
  1722. * by calling exec_op function.
  1723. */
  1724. for (op_id = 0; op_id < subop->ninstrs; op_id++) {
  1725. int ret;
  1726. const struct nand_operation nand_op = {
  1727. .cs = chip->cur_cs,
  1728. .instrs = &subop->instrs[op_id],
  1729. .ninstrs = 1};
  1730. ret = chip->controller->ops->exec_op(chip, &nand_op, false);
  1731. if (ret)
  1732. return ret;
  1733. }
  1734. return 0;
  1735. }
  1736. static int cadence_nand_cmd_data(struct nand_chip *chip,
  1737. const struct nand_subop *subop)
  1738. {
  1739. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1740. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1741. const struct nand_op_instr *instr;
  1742. unsigned int offset, op_id = 0;
  1743. u64 mini_ctrl_cmd = 0;
  1744. int len = 0;
  1745. int ret;
  1746. instr = &subop->instrs[op_id];
  1747. if (instr->delay_ns > 0)
  1748. mini_ctrl_cmd |= GCMD_LAY_TWB;
  1749. mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
  1750. GCMD_LAY_INSTR_DATA);
  1751. if (instr->type == NAND_OP_DATA_OUT_INSTR)
  1752. mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
  1753. GCMD_DIR_WRITE);
  1754. len = nand_subop_get_data_len(subop, op_id);
  1755. offset = nand_subop_get_data_start_off(subop, op_id);
  1756. mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
  1757. mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
  1758. if (instr->ctx.data.force_8bit) {
  1759. ret = cadence_nand_force_byte_access(chip, true);
  1760. if (ret) {
  1761. dev_err(cdns_ctrl->dev,
  1762. "cannot change byte access generic data cmd failed\n");
  1763. return ret;
  1764. }
  1765. }
  1766. ret = cadence_nand_generic_cmd_send(cdns_ctrl,
  1767. cdns_chip->cs[chip->cur_cs],
  1768. mini_ctrl_cmd);
  1769. if (ret) {
  1770. dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
  1771. return ret;
  1772. }
  1773. if (instr->type == NAND_OP_DATA_IN_INSTR) {
  1774. void *buf = instr->ctx.data.buf.in + offset;
  1775. ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
  1776. } else {
  1777. const void *buf = instr->ctx.data.buf.out + offset;
  1778. ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
  1779. }
  1780. if (ret) {
  1781. dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
  1782. return ret;
  1783. }
  1784. if (instr->ctx.data.force_8bit) {
  1785. ret = cadence_nand_force_byte_access(chip, false);
  1786. if (ret) {
  1787. dev_err(cdns_ctrl->dev,
  1788. "cannot change byte access generic data cmd failed\n");
  1789. }
  1790. }
  1791. return ret;
  1792. }
  1793. static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
  1794. const struct nand_subop *subop)
  1795. {
  1796. int status;
  1797. unsigned int op_id = 0;
  1798. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1799. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1800. const struct nand_op_instr *instr = &subop->instrs[op_id];
  1801. u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
  1802. status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
  1803. timeout_us,
  1804. BIT(cdns_chip->cs[chip->cur_cs]),
  1805. false);
  1806. return status;
  1807. }
  1808. static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
  1809. NAND_OP_PARSER_PATTERN(
  1810. cadence_nand_cmd_erase,
  1811. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  1812. NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
  1813. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  1814. NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
  1815. NAND_OP_PARSER_PATTERN(
  1816. cadence_nand_cmd_opcode,
  1817. NAND_OP_PARSER_PAT_CMD_ELEM(false)),
  1818. NAND_OP_PARSER_PATTERN(
  1819. cadence_nand_cmd_address,
  1820. NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
  1821. NAND_OP_PARSER_PATTERN(
  1822. cadence_nand_cmd_data,
  1823. NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
  1824. NAND_OP_PARSER_PATTERN(
  1825. cadence_nand_cmd_data,
  1826. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
  1827. NAND_OP_PARSER_PATTERN(
  1828. cadence_nand_cmd_waitrdy,
  1829. NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
  1830. );
  1831. static int cadence_nand_exec_op(struct nand_chip *chip,
  1832. const struct nand_operation *op,
  1833. bool check_only)
  1834. {
  1835. if (!check_only) {
  1836. int status = cadence_nand_select_target(chip);
  1837. if (status)
  1838. return status;
  1839. }
  1840. return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
  1841. check_only);
  1842. }
  1843. static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
  1844. struct mtd_oob_region *oobregion)
  1845. {
  1846. struct nand_chip *chip = mtd_to_nand(mtd);
  1847. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1848. if (section)
  1849. return -ERANGE;
  1850. oobregion->offset = cdns_chip->bbm_len;
  1851. oobregion->length = cdns_chip->avail_oob_size
  1852. - cdns_chip->bbm_len;
  1853. return 0;
  1854. }
  1855. static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  1856. struct mtd_oob_region *oobregion)
  1857. {
  1858. struct nand_chip *chip = mtd_to_nand(mtd);
  1859. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1860. if (section)
  1861. return -ERANGE;
  1862. oobregion->offset = cdns_chip->avail_oob_size;
  1863. oobregion->length = chip->ecc.total;
  1864. return 0;
  1865. }
  1866. static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
  1867. .free = cadence_nand_ooblayout_free,
  1868. .ecc = cadence_nand_ooblayout_ecc,
  1869. };
  1870. static int calc_cycl(u32 timing, u32 clock)
  1871. {
  1872. if (timing == 0 || clock == 0)
  1873. return 0;
  1874. if ((timing % clock) > 0)
  1875. return timing / clock;
  1876. else
  1877. return timing / clock - 1;
  1878. }
  1879. /* Calculate max data valid window. */
  1880. static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
  1881. u32 board_delay_skew_min, u32 ext_mode)
  1882. {
  1883. if (ext_mode == 0)
  1884. clk_period /= 2;
  1885. return (trp_cnt + 1) * clk_period + trhoh_min +
  1886. board_delay_skew_min;
  1887. }
  1888. /* Calculate data valid window. */
  1889. static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
  1890. u32 trea_max, u32 ext_mode)
  1891. {
  1892. if (ext_mode == 0)
  1893. clk_period /= 2;
  1894. return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
  1895. }
  1896. static int
  1897. cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
  1898. const struct nand_interface_config *conf)
  1899. {
  1900. const struct nand_sdr_timings *sdr;
  1901. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  1902. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  1903. struct cadence_nand_timings *t = &cdns_chip->timings;
  1904. u32 reg;
  1905. u32 board_delay = cdns_ctrl->board_delay;
  1906. u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
  1907. cdns_ctrl->nf_clk_rate);
  1908. u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
  1909. u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
  1910. u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
  1911. u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
  1912. u32 if_skew = cdns_ctrl->caps1->if_skew;
  1913. u32 board_delay_skew_min = board_delay - if_skew;
  1914. u32 board_delay_skew_max = board_delay + if_skew;
  1915. u32 dqs_sampl_res, phony_dqs_mod;
  1916. u32 tdvw, tdvw_min, tdvw_max;
  1917. u32 ext_rd_mode, ext_wr_mode;
  1918. u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
  1919. u32 sampling_point;
  1920. sdr = nand_get_sdr_timings(conf);
  1921. if (IS_ERR(sdr))
  1922. return PTR_ERR(sdr);
  1923. memset(t, 0, sizeof(*t));
  1924. /* Sampling point calculation. */
  1925. if (cdns_ctrl->caps2.is_phy_type_dll)
  1926. phony_dqs_mod = 2;
  1927. else
  1928. phony_dqs_mod = 1;
  1929. dqs_sampl_res = clk_period / phony_dqs_mod;
  1930. tdvw_min = sdr->tREA_max + board_delay_skew_max;
  1931. /*
  1932. * The idea of those calculation is to get the optimum value
  1933. * for tRP and tRH timings. If it is NOT possible to sample data
  1934. * with optimal tRP/tRH settings, the parameters will be extended.
  1935. * If clk_period is 50ns (the lowest value) this condition is met
  1936. * for SDR timing modes 1, 2, 3, 4 and 5.
  1937. * If clk_period is 20ns the condition is met only for SDR timing
  1938. * mode 5.
  1939. */
  1940. if (sdr->tRC_min <= clk_period &&
  1941. sdr->tRP_min <= (clk_period / 2) &&
  1942. sdr->tREH_min <= (clk_period / 2)) {
  1943. /* Performance mode. */
  1944. ext_rd_mode = 0;
  1945. tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
  1946. sdr->tREA_max, ext_rd_mode);
  1947. tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
  1948. board_delay_skew_min,
  1949. ext_rd_mode);
  1950. /*
  1951. * Check if data valid window and sampling point can be found
  1952. * and is not on the edge (ie. we have hold margin).
  1953. * If not extend the tRP timings.
  1954. */
  1955. if (tdvw > 0) {
  1956. if (tdvw_max <= tdvw_min ||
  1957. (tdvw_max % dqs_sampl_res) == 0) {
  1958. /*
  1959. * No valid sampling point so the RE pulse need
  1960. * to be widen widening by half clock cycle.
  1961. */
  1962. ext_rd_mode = 1;
  1963. }
  1964. } else {
  1965. /*
  1966. * There is no valid window
  1967. * to be able to sample data the tRP need to be widen.
  1968. * Very safe calculations are performed here.
  1969. */
  1970. trp_cnt = (sdr->tREA_max + board_delay_skew_max
  1971. + dqs_sampl_res) / clk_period;
  1972. ext_rd_mode = 1;
  1973. }
  1974. } else {
  1975. /* Extended read mode. */
  1976. u32 trh;
  1977. ext_rd_mode = 1;
  1978. trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
  1979. trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
  1980. if (sdr->tREH_min >= trh)
  1981. trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
  1982. else
  1983. trh_cnt = calc_cycl(trh, clk_period);
  1984. tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
  1985. sdr->tREA_max, ext_rd_mode);
  1986. /*
  1987. * Check if data valid window and sampling point can be found
  1988. * or if it is at the edge check if previous is valid
  1989. * - if not extend the tRP timings.
  1990. */
  1991. if (tdvw > 0) {
  1992. tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
  1993. sdr->tRHOH_min,
  1994. board_delay_skew_min,
  1995. ext_rd_mode);
  1996. if ((((tdvw_max / dqs_sampl_res)
  1997. * dqs_sampl_res) <= tdvw_min) ||
  1998. (((tdvw_max % dqs_sampl_res) == 0) &&
  1999. (((tdvw_max / dqs_sampl_res - 1)
  2000. * dqs_sampl_res) <= tdvw_min))) {
  2001. /*
  2002. * Data valid window width is lower than
  2003. * sampling resolution and do not hit any
  2004. * sampling point to be sure the sampling point
  2005. * will be found the RE low pulse width will be
  2006. * extended by one clock cycle.
  2007. */
  2008. trp_cnt = trp_cnt + 1;
  2009. }
  2010. } else {
  2011. /*
  2012. * There is no valid window to be able to sample data.
  2013. * The tRP need to be widen.
  2014. * Very safe calculations are performed here.
  2015. */
  2016. trp_cnt = (sdr->tREA_max + board_delay_skew_max
  2017. + dqs_sampl_res) / clk_period;
  2018. }
  2019. }
  2020. tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
  2021. sdr->tRHOH_min,
  2022. board_delay_skew_min, ext_rd_mode);
  2023. if (sdr->tWC_min <= clk_period &&
  2024. (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
  2025. (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
  2026. ext_wr_mode = 0;
  2027. } else {
  2028. u32 twh;
  2029. ext_wr_mode = 1;
  2030. twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
  2031. if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
  2032. twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
  2033. clk_period);
  2034. twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
  2035. if (sdr->tWH_min >= twh)
  2036. twh = sdr->tWH_min;
  2037. twh_cnt = calc_cycl(twh + if_skew, clk_period);
  2038. }
  2039. reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
  2040. reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
  2041. reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
  2042. reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
  2043. t->async_toggle_timings = reg;
  2044. dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
  2045. tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
  2046. tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
  2047. twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
  2048. trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
  2049. reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
  2050. /*
  2051. * If timing exceeds delay field in timing register
  2052. * then use maximum value.
  2053. */
  2054. if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
  2055. reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
  2056. else
  2057. reg |= TIMINGS0_TCCS;
  2058. reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
  2059. reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
  2060. t->timings0 = reg;
  2061. dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
  2062. /* The following is related to single signal so skew is not needed. */
  2063. trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
  2064. trhz_cnt = trhz_cnt + 1;
  2065. twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
  2066. /*
  2067. * Because of the two stage syncflop the value must be increased by 3
  2068. * first value is related with sync, second value is related
  2069. * with output if delay.
  2070. */
  2071. twb_cnt = twb_cnt + 3 + 5;
  2072. /*
  2073. * The following is related to the we edge of the random data input
  2074. * sequence so skew is not needed.
  2075. */
  2076. tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
  2077. reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
  2078. reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
  2079. reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
  2080. t->timings1 = reg;
  2081. dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
  2082. tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
  2083. if (tfeat_cnt < twb_cnt)
  2084. tfeat_cnt = twb_cnt;
  2085. tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
  2086. tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
  2087. reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
  2088. reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
  2089. reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
  2090. t->timings2 = reg;
  2091. dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
  2092. if (cdns_ctrl->caps2.is_phy_type_dll) {
  2093. reg = DLL_PHY_CTRL_DLL_RST_N;
  2094. if (ext_wr_mode)
  2095. reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
  2096. if (ext_rd_mode)
  2097. reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
  2098. reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
  2099. reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
  2100. t->dll_phy_ctrl = reg;
  2101. dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
  2102. }
  2103. /* Sampling point calculation. */
  2104. if ((tdvw_max % dqs_sampl_res) > 0)
  2105. sampling_point = tdvw_max / dqs_sampl_res;
  2106. else
  2107. sampling_point = (tdvw_max / dqs_sampl_res - 1);
  2108. if (sampling_point * dqs_sampl_res > tdvw_min) {
  2109. dll_phy_dqs_timing =
  2110. FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
  2111. dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
  2112. phony_dqs_timing = sampling_point / phony_dqs_mod;
  2113. if ((sampling_point % 2) > 0) {
  2114. dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
  2115. if ((tdvw_max % dqs_sampl_res) == 0)
  2116. /*
  2117. * Calculation for sampling point at the edge
  2118. * of data and being odd number.
  2119. */
  2120. phony_dqs_timing = (tdvw_max / dqs_sampl_res)
  2121. / phony_dqs_mod - 1;
  2122. if (!cdns_ctrl->caps2.is_phy_type_dll)
  2123. phony_dqs_timing--;
  2124. } else {
  2125. phony_dqs_timing--;
  2126. }
  2127. rd_del_sel = phony_dqs_timing + 3;
  2128. } else {
  2129. dev_warn(cdns_ctrl->dev,
  2130. "ERROR : cannot find valid sampling point\n");
  2131. }
  2132. reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
  2133. if (cdns_ctrl->caps2.is_phy_type_dll)
  2134. reg |= PHY_CTRL_SDR_DQS;
  2135. t->phy_ctrl = reg;
  2136. dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
  2137. if (cdns_ctrl->caps2.is_phy_type_dll) {
  2138. dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
  2139. dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
  2140. dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
  2141. dll_phy_dqs_timing);
  2142. t->phy_dqs_timing = dll_phy_dqs_timing;
  2143. reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
  2144. dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
  2145. reg);
  2146. t->phy_gate_lpbk_ctrl = reg;
  2147. dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
  2148. PHY_DLL_MASTER_CTRL_BYPASS_MODE);
  2149. dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
  2150. }
  2151. return 0;
  2152. }
  2153. static int cadence_nand_attach_chip(struct nand_chip *chip)
  2154. {
  2155. struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
  2156. struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
  2157. u32 ecc_size;
  2158. struct mtd_info *mtd = nand_to_mtd(chip);
  2159. int ret;
  2160. if (chip->options & NAND_BUSWIDTH_16) {
  2161. ret = cadence_nand_set_access_width16(cdns_ctrl, true);
  2162. if (ret)
  2163. return ret;
  2164. }
  2165. chip->bbt_options |= NAND_BBT_USE_FLASH;
  2166. chip->bbt_options |= NAND_BBT_NO_OOB;
  2167. chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  2168. chip->options |= NAND_NO_SUBPAGE_WRITE;
  2169. cdns_chip->bbm_offs = chip->badblockpos;
  2170. cdns_chip->bbm_offs &= ~0x01;
  2171. /* this value should be even number */
  2172. cdns_chip->bbm_len = 2;
  2173. ret = nand_ecc_choose_conf(chip,
  2174. &cdns_ctrl->ecc_caps,
  2175. mtd->oobsize - cdns_chip->bbm_len);
  2176. if (ret) {
  2177. dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
  2178. return ret;
  2179. }
  2180. dev_dbg(cdns_ctrl->dev,
  2181. "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
  2182. chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
  2183. /* Error correction configuration. */
  2184. cdns_chip->sector_size = chip->ecc.size;
  2185. cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
  2186. ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
  2187. cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
  2188. if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
  2189. cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
  2190. if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
  2191. > mtd->oobsize)
  2192. cdns_chip->avail_oob_size -= 4;
  2193. ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
  2194. if (ret < 0)
  2195. return -EINVAL;
  2196. cdns_chip->corr_str_idx = (u8)ret;
  2197. if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
  2198. 1000000,
  2199. CTRL_STATUS_CTRL_BUSY, true))
  2200. return -ETIMEDOUT;
  2201. cadence_nand_set_ecc_strength(cdns_ctrl,
  2202. cdns_chip->corr_str_idx);
  2203. cadence_nand_set_erase_detection(cdns_ctrl, true,
  2204. chip->ecc.strength);
  2205. /* Override the default read operations. */
  2206. chip->ecc.read_page = cadence_nand_read_page;
  2207. chip->ecc.read_page_raw = cadence_nand_read_page_raw;
  2208. chip->ecc.write_page = cadence_nand_write_page;
  2209. chip->ecc.write_page_raw = cadence_nand_write_page_raw;
  2210. chip->ecc.read_oob = cadence_nand_read_oob;
  2211. chip->ecc.write_oob = cadence_nand_write_oob;
  2212. chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
  2213. chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
  2214. if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
  2215. cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
  2216. /* Is 32-bit DMA supported? */
  2217. ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
  2218. if (ret) {
  2219. dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
  2220. return ret;
  2221. }
  2222. mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
  2223. return 0;
  2224. }
  2225. static const struct nand_controller_ops cadence_nand_controller_ops = {
  2226. .attach_chip = cadence_nand_attach_chip,
  2227. .exec_op = cadence_nand_exec_op,
  2228. .setup_interface = cadence_nand_setup_interface,
  2229. };
  2230. static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
  2231. struct device_node *np)
  2232. {
  2233. struct cdns_nand_chip *cdns_chip;
  2234. struct mtd_info *mtd;
  2235. struct nand_chip *chip;
  2236. int nsels, ret, i;
  2237. u32 cs;
  2238. nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
  2239. if (nsels <= 0) {
  2240. dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
  2241. return -EINVAL;
  2242. }
  2243. /* Allocate the nand chip structure. */
  2244. cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
  2245. (nsels * sizeof(u8)),
  2246. GFP_KERNEL);
  2247. if (!cdns_chip) {
  2248. dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
  2249. return -ENOMEM;
  2250. }
  2251. cdns_chip->nsels = nsels;
  2252. for (i = 0; i < nsels; i++) {
  2253. /* Retrieve CS id. */
  2254. ret = of_property_read_u32_index(np, "reg", i, &cs);
  2255. if (ret) {
  2256. dev_err(cdns_ctrl->dev,
  2257. "could not retrieve reg property: %d\n",
  2258. ret);
  2259. return ret;
  2260. }
  2261. if (cs >= cdns_ctrl->caps2.max_banks) {
  2262. dev_err(cdns_ctrl->dev,
  2263. "invalid reg value: %u (max CS = %d)\n",
  2264. cs, cdns_ctrl->caps2.max_banks);
  2265. return -EINVAL;
  2266. }
  2267. if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
  2268. dev_err(cdns_ctrl->dev,
  2269. "CS %d already assigned\n", cs);
  2270. return -EINVAL;
  2271. }
  2272. cdns_chip->cs[i] = cs;
  2273. }
  2274. chip = &cdns_chip->chip;
  2275. chip->controller = &cdns_ctrl->controller;
  2276. nand_set_flash_node(chip, np);
  2277. mtd = nand_to_mtd(chip);
  2278. mtd->dev.parent = cdns_ctrl->dev;
  2279. /*
  2280. * Default to HW ECC engine mode. If the nand-ecc-mode property is given
  2281. * in the DT node, this entry will be overwritten in nand_scan_ident().
  2282. */
  2283. chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  2284. ret = nand_scan(chip, cdns_chip->nsels);
  2285. if (ret) {
  2286. dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
  2287. return ret;
  2288. }
  2289. ret = mtd_device_register(mtd, NULL, 0);
  2290. if (ret) {
  2291. dev_err(cdns_ctrl->dev,
  2292. "failed to register mtd device: %d\n", ret);
  2293. nand_cleanup(chip);
  2294. return ret;
  2295. }
  2296. list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
  2297. return 0;
  2298. }
  2299. static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
  2300. {
  2301. struct cdns_nand_chip *entry, *temp;
  2302. struct nand_chip *chip;
  2303. int ret;
  2304. list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
  2305. chip = &entry->chip;
  2306. ret = mtd_device_unregister(nand_to_mtd(chip));
  2307. WARN_ON(ret);
  2308. nand_cleanup(chip);
  2309. list_del(&entry->node);
  2310. }
  2311. }
  2312. static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
  2313. {
  2314. struct device_node *np = cdns_ctrl->dev->of_node;
  2315. struct device_node *nand_np;
  2316. int max_cs = cdns_ctrl->caps2.max_banks;
  2317. int nchips, ret;
  2318. nchips = of_get_child_count(np);
  2319. if (nchips > max_cs) {
  2320. dev_err(cdns_ctrl->dev,
  2321. "too many NAND chips: %d (max = %d CS)\n",
  2322. nchips, max_cs);
  2323. return -EINVAL;
  2324. }
  2325. for_each_child_of_node(np, nand_np) {
  2326. ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
  2327. if (ret) {
  2328. of_node_put(nand_np);
  2329. cadence_nand_chips_cleanup(cdns_ctrl);
  2330. return ret;
  2331. }
  2332. }
  2333. return 0;
  2334. }
  2335. static void
  2336. cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
  2337. {
  2338. /* Disable interrupts. */
  2339. writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
  2340. }
  2341. static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
  2342. {
  2343. dma_cap_mask_t mask;
  2344. int ret;
  2345. cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
  2346. sizeof(*cdns_ctrl->cdma_desc),
  2347. &cdns_ctrl->dma_cdma_desc,
  2348. GFP_KERNEL);
  2349. if (!cdns_ctrl->dma_cdma_desc)
  2350. return -ENOMEM;
  2351. cdns_ctrl->buf_size = SZ_16K;
  2352. cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
  2353. if (!cdns_ctrl->buf) {
  2354. ret = -ENOMEM;
  2355. goto free_buf_desc;
  2356. }
  2357. if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
  2358. IRQF_SHARED, "cadence-nand-controller",
  2359. cdns_ctrl)) {
  2360. dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
  2361. ret = -ENODEV;
  2362. goto free_buf;
  2363. }
  2364. spin_lock_init(&cdns_ctrl->irq_lock);
  2365. init_completion(&cdns_ctrl->complete);
  2366. ret = cadence_nand_hw_init(cdns_ctrl);
  2367. if (ret)
  2368. goto disable_irq;
  2369. dma_cap_zero(mask);
  2370. dma_cap_set(DMA_MEMCPY, mask);
  2371. if (cdns_ctrl->caps1->has_dma) {
  2372. cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
  2373. if (!cdns_ctrl->dmac) {
  2374. dev_err(cdns_ctrl->dev,
  2375. "Unable to get a DMA channel\n");
  2376. ret = -EBUSY;
  2377. goto disable_irq;
  2378. }
  2379. }
  2380. nand_controller_init(&cdns_ctrl->controller);
  2381. INIT_LIST_HEAD(&cdns_ctrl->chips);
  2382. cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
  2383. cdns_ctrl->curr_corr_str_idx = 0xFF;
  2384. ret = cadence_nand_chips_init(cdns_ctrl);
  2385. if (ret) {
  2386. dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
  2387. ret);
  2388. goto dma_release_chnl;
  2389. }
  2390. kfree(cdns_ctrl->buf);
  2391. cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
  2392. if (!cdns_ctrl->buf) {
  2393. ret = -ENOMEM;
  2394. goto dma_release_chnl;
  2395. }
  2396. return 0;
  2397. dma_release_chnl:
  2398. if (cdns_ctrl->dmac)
  2399. dma_release_channel(cdns_ctrl->dmac);
  2400. disable_irq:
  2401. cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
  2402. free_buf:
  2403. kfree(cdns_ctrl->buf);
  2404. free_buf_desc:
  2405. dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
  2406. cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
  2407. return ret;
  2408. }
  2409. /* Driver exit point. */
  2410. static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
  2411. {
  2412. cadence_nand_chips_cleanup(cdns_ctrl);
  2413. cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
  2414. kfree(cdns_ctrl->buf);
  2415. dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
  2416. cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
  2417. if (cdns_ctrl->dmac)
  2418. dma_release_channel(cdns_ctrl->dmac);
  2419. }
  2420. struct cadence_nand_dt {
  2421. struct cdns_nand_ctrl cdns_ctrl;
  2422. struct clk *clk;
  2423. };
  2424. static const struct cadence_nand_dt_devdata cadence_nand_default = {
  2425. .if_skew = 0,
  2426. .has_dma = 1,
  2427. };
  2428. static const struct of_device_id cadence_nand_dt_ids[] = {
  2429. {
  2430. .compatible = "cdns,hp-nfc",
  2431. .data = &cadence_nand_default
  2432. }, {}
  2433. };
  2434. MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
  2435. static int cadence_nand_dt_probe(struct platform_device *ofdev)
  2436. {
  2437. struct resource *res;
  2438. struct cadence_nand_dt *dt;
  2439. struct cdns_nand_ctrl *cdns_ctrl;
  2440. int ret;
  2441. const struct of_device_id *of_id;
  2442. const struct cadence_nand_dt_devdata *devdata;
  2443. u32 val;
  2444. of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
  2445. if (of_id) {
  2446. ofdev->id_entry = of_id->data;
  2447. devdata = of_id->data;
  2448. } else {
  2449. pr_err("Failed to find the right device id.\n");
  2450. return -ENOMEM;
  2451. }
  2452. dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
  2453. if (!dt)
  2454. return -ENOMEM;
  2455. cdns_ctrl = &dt->cdns_ctrl;
  2456. cdns_ctrl->caps1 = devdata;
  2457. cdns_ctrl->dev = &ofdev->dev;
  2458. cdns_ctrl->irq = platform_get_irq(ofdev, 0);
  2459. if (cdns_ctrl->irq < 0)
  2460. return cdns_ctrl->irq;
  2461. dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
  2462. cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
  2463. if (IS_ERR(cdns_ctrl->reg))
  2464. return PTR_ERR(cdns_ctrl->reg);
  2465. cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
  2466. if (IS_ERR(cdns_ctrl->io.virt))
  2467. return PTR_ERR(cdns_ctrl->io.virt);
  2468. cdns_ctrl->io.dma = res->start;
  2469. dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
  2470. if (IS_ERR(dt->clk))
  2471. return PTR_ERR(dt->clk);
  2472. cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
  2473. ret = of_property_read_u32(ofdev->dev.of_node,
  2474. "cdns,board-delay-ps", &val);
  2475. if (ret) {
  2476. val = 4830;
  2477. dev_info(cdns_ctrl->dev,
  2478. "missing cdns,board-delay-ps property, %d was set\n",
  2479. val);
  2480. }
  2481. cdns_ctrl->board_delay = val;
  2482. ret = cadence_nand_init(cdns_ctrl);
  2483. if (ret)
  2484. return ret;
  2485. platform_set_drvdata(ofdev, dt);
  2486. return 0;
  2487. }
  2488. static int cadence_nand_dt_remove(struct platform_device *ofdev)
  2489. {
  2490. struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
  2491. cadence_nand_remove(&dt->cdns_ctrl);
  2492. return 0;
  2493. }
  2494. static struct platform_driver cadence_nand_dt_driver = {
  2495. .probe = cadence_nand_dt_probe,
  2496. .remove = cadence_nand_dt_remove,
  2497. .driver = {
  2498. .name = "cadence-nand-controller",
  2499. .of_match_table = cadence_nand_dt_ids,
  2500. },
  2501. };
  2502. module_platform_driver(cadence_nand_dt_driver);
  2503. MODULE_AUTHOR("Piotr Sroka <[email protected]>");
  2504. MODULE_LICENSE("GPL v2");
  2505. MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");