sunxi_nand.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2013 Boris BREZILLON <[email protected]>
  4. *
  5. * Derived from:
  6. * https://github.com/yuq/sunxi-nfc-mtd
  7. * Copyright (C) 2013 Qiang Yu <[email protected]>
  8. *
  9. * https://github.com/hno/Allwinner-Info
  10. * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
  11. *
  12. * Copyright (C) 2013 Dmitriy B. <[email protected]>
  13. * Copyright (C) 2013 Sergey Lapin <[email protected]>
  14. */
  15. #include <linux/dma-mapping.h>
  16. #include <linux/slab.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/of.h>
  21. #include <linux/of_device.h>
  22. #include <linux/mtd/mtd.h>
  23. #include <linux/mtd/rawnand.h>
  24. #include <linux/mtd/partitions.h>
  25. #include <linux/clk.h>
  26. #include <linux/delay.h>
  27. #include <linux/dmaengine.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/iopoll.h>
  30. #include <linux/reset.h>
  31. #define NFC_REG_CTL 0x0000
  32. #define NFC_REG_ST 0x0004
  33. #define NFC_REG_INT 0x0008
  34. #define NFC_REG_TIMING_CTL 0x000C
  35. #define NFC_REG_TIMING_CFG 0x0010
  36. #define NFC_REG_ADDR_LOW 0x0014
  37. #define NFC_REG_ADDR_HIGH 0x0018
  38. #define NFC_REG_SECTOR_NUM 0x001C
  39. #define NFC_REG_CNT 0x0020
  40. #define NFC_REG_CMD 0x0024
  41. #define NFC_REG_RCMD_SET 0x0028
  42. #define NFC_REG_WCMD_SET 0x002C
  43. #define NFC_REG_A10_IO_DATA 0x0030
  44. #define NFC_REG_A23_IO_DATA 0x0300
  45. #define NFC_REG_ECC_CTL 0x0034
  46. #define NFC_REG_ECC_ST 0x0038
  47. #define NFC_REG_DEBUG 0x003C
  48. #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
  49. #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
  50. #define NFC_REG_SPARE_AREA 0x00A0
  51. #define NFC_REG_PAT_ID 0x00A4
  52. #define NFC_REG_MDMA_ADDR 0x00C0
  53. #define NFC_REG_MDMA_CNT 0x00C4
  54. #define NFC_RAM0_BASE 0x0400
  55. #define NFC_RAM1_BASE 0x0800
  56. /* define bit use in NFC_CTL */
  57. #define NFC_EN BIT(0)
  58. #define NFC_RESET BIT(1)
  59. #define NFC_BUS_WIDTH_MSK BIT(2)
  60. #define NFC_BUS_WIDTH_8 (0 << 2)
  61. #define NFC_BUS_WIDTH_16 (1 << 2)
  62. #define NFC_RB_SEL_MSK BIT(3)
  63. #define NFC_RB_SEL(x) ((x) << 3)
  64. #define NFC_CE_SEL_MSK GENMASK(26, 24)
  65. #define NFC_CE_SEL(x) ((x) << 24)
  66. #define NFC_CE_CTL BIT(6)
  67. #define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
  68. #define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
  69. #define NFC_SAM BIT(12)
  70. #define NFC_RAM_METHOD BIT(14)
  71. #define NFC_DMA_TYPE_NORMAL BIT(15)
  72. #define NFC_DEBUG_CTL BIT(31)
  73. /* define bit use in NFC_ST */
  74. #define NFC_RB_B2R BIT(0)
  75. #define NFC_CMD_INT_FLAG BIT(1)
  76. #define NFC_DMA_INT_FLAG BIT(2)
  77. #define NFC_CMD_FIFO_STATUS BIT(3)
  78. #define NFC_STA BIT(4)
  79. #define NFC_NATCH_INT_FLAG BIT(5)
  80. #define NFC_RB_STATE(x) BIT(x + 8)
  81. /* define bit use in NFC_INT */
  82. #define NFC_B2R_INT_ENABLE BIT(0)
  83. #define NFC_CMD_INT_ENABLE BIT(1)
  84. #define NFC_DMA_INT_ENABLE BIT(2)
  85. #define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
  86. NFC_CMD_INT_ENABLE | \
  87. NFC_DMA_INT_ENABLE)
  88. /* define bit use in NFC_TIMING_CTL */
  89. #define NFC_TIMING_CTL_EDO BIT(8)
  90. /* define NFC_TIMING_CFG register layout */
  91. #define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
  92. (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
  93. (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
  94. (((tCAD) & 0x7) << 8))
  95. /* define bit use in NFC_CMD */
  96. #define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
  97. #define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
  98. #define NFC_CMD(x) (x)
  99. #define NFC_ADR_NUM_MSK GENMASK(18, 16)
  100. #define NFC_ADR_NUM(x) (((x) - 1) << 16)
  101. #define NFC_SEND_ADR BIT(19)
  102. #define NFC_ACCESS_DIR BIT(20)
  103. #define NFC_DATA_TRANS BIT(21)
  104. #define NFC_SEND_CMD1 BIT(22)
  105. #define NFC_WAIT_FLAG BIT(23)
  106. #define NFC_SEND_CMD2 BIT(24)
  107. #define NFC_SEQ BIT(25)
  108. #define NFC_DATA_SWAP_METHOD BIT(26)
  109. #define NFC_ROW_AUTO_INC BIT(27)
  110. #define NFC_SEND_CMD3 BIT(28)
  111. #define NFC_SEND_CMD4 BIT(29)
  112. #define NFC_CMD_TYPE_MSK GENMASK(31, 30)
  113. #define NFC_NORMAL_OP (0 << 30)
  114. #define NFC_ECC_OP (1 << 30)
  115. #define NFC_PAGE_OP (2U << 30)
  116. /* define bit use in NFC_RCMD_SET */
  117. #define NFC_READ_CMD_MSK GENMASK(7, 0)
  118. #define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
  119. #define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
  120. /* define bit use in NFC_WCMD_SET */
  121. #define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
  122. #define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
  123. #define NFC_READ_CMD0_MSK GENMASK(23, 16)
  124. #define NFC_READ_CMD1_MSK GENMASK(31, 24)
  125. /* define bit use in NFC_ECC_CTL */
  126. #define NFC_ECC_EN BIT(0)
  127. #define NFC_ECC_PIPELINE BIT(3)
  128. #define NFC_ECC_EXCEPTION BIT(4)
  129. #define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
  130. #define NFC_ECC_BLOCK_512 BIT(5)
  131. #define NFC_RANDOM_EN BIT(9)
  132. #define NFC_RANDOM_DIRECTION BIT(10)
  133. #define NFC_ECC_MODE_MSK GENMASK(15, 12)
  134. #define NFC_ECC_MODE(x) ((x) << 12)
  135. #define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
  136. #define NFC_RANDOM_SEED(x) ((x) << 16)
  137. /* define bit use in NFC_ECC_ST */
  138. #define NFC_ECC_ERR(x) BIT(x)
  139. #define NFC_ECC_ERR_MSK GENMASK(15, 0)
  140. #define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
  141. #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
  142. #define NFC_DEFAULT_TIMEOUT_MS 1000
  143. #define NFC_SRAM_SIZE 1024
  144. #define NFC_MAX_CS 7
  145. /**
  146. * struct sunxi_nand_chip_sel - stores information related to NAND Chip Select
  147. *
  148. * @cs: the NAND CS id used to communicate with a NAND Chip
  149. * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC
  150. */
  151. struct sunxi_nand_chip_sel {
  152. u8 cs;
  153. s8 rb;
  154. };
  155. /**
  156. * struct sunxi_nand_hw_ecc - stores information related to HW ECC support
  157. *
  158. * @mode: the sunxi ECC mode field deduced from ECC requirements
  159. */
  160. struct sunxi_nand_hw_ecc {
  161. int mode;
  162. };
  163. /**
  164. * struct sunxi_nand_chip - stores NAND chip device related information
  165. *
  166. * @node: used to store NAND chips into a list
  167. * @nand: base NAND chip structure
  168. * @ecc: ECC controller structure
  169. * @clk_rate: clk_rate required for this NAND chip
  170. * @timing_cfg: TIMING_CFG register value for this NAND chip
  171. * @timing_ctl: TIMING_CTL register value for this NAND chip
  172. * @nsels: number of CS lines required by the NAND chip
  173. * @sels: array of CS lines descriptions
  174. */
  175. struct sunxi_nand_chip {
  176. struct list_head node;
  177. struct nand_chip nand;
  178. struct sunxi_nand_hw_ecc *ecc;
  179. unsigned long clk_rate;
  180. u32 timing_cfg;
  181. u32 timing_ctl;
  182. int nsels;
  183. struct sunxi_nand_chip_sel sels[];
  184. };
  185. static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
  186. {
  187. return container_of(nand, struct sunxi_nand_chip, nand);
  188. }
  189. /*
  190. * NAND Controller capabilities structure: stores NAND controller capabilities
  191. * for distinction between compatible strings.
  192. *
  193. * @has_mdma: Use mbus dma mode, otherwise general dma
  194. * through MBUS on A23/A33 needs extra configuration.
  195. * @reg_io_data: I/O data register
  196. * @dma_maxburst: DMA maxburst
  197. */
  198. struct sunxi_nfc_caps {
  199. bool has_mdma;
  200. unsigned int reg_io_data;
  201. unsigned int dma_maxburst;
  202. };
  203. /**
  204. * struct sunxi_nfc - stores sunxi NAND controller information
  205. *
  206. * @controller: base controller structure
  207. * @dev: parent device (used to print error messages)
  208. * @regs: NAND controller registers
  209. * @ahb_clk: NAND controller AHB clock
  210. * @mod_clk: NAND controller mod clock
  211. * @reset: NAND controller reset line
  212. * @assigned_cs: bitmask describing already assigned CS lines
  213. * @clk_rate: NAND controller current clock rate
  214. * @chips: a list containing all the NAND chips attached to this NAND
  215. * controller
  216. * @complete: a completion object used to wait for NAND controller events
  217. * @dmac: the DMA channel attached to the NAND controller
  218. * @caps: NAND Controller capabilities
  219. */
  220. struct sunxi_nfc {
  221. struct nand_controller controller;
  222. struct device *dev;
  223. void __iomem *regs;
  224. struct clk *ahb_clk;
  225. struct clk *mod_clk;
  226. struct reset_control *reset;
  227. unsigned long assigned_cs;
  228. unsigned long clk_rate;
  229. struct list_head chips;
  230. struct completion complete;
  231. struct dma_chan *dmac;
  232. const struct sunxi_nfc_caps *caps;
  233. };
  234. static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl)
  235. {
  236. return container_of(ctrl, struct sunxi_nfc, controller);
  237. }
  238. static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
  239. {
  240. struct sunxi_nfc *nfc = dev_id;
  241. u32 st = readl(nfc->regs + NFC_REG_ST);
  242. u32 ien = readl(nfc->regs + NFC_REG_INT);
  243. if (!(ien & st))
  244. return IRQ_NONE;
  245. if ((ien & st) == ien)
  246. complete(&nfc->complete);
  247. writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
  248. writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
  249. return IRQ_HANDLED;
  250. }
  251. static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
  252. bool use_polling, unsigned int timeout_ms)
  253. {
  254. int ret;
  255. if (events & ~NFC_INT_MASK)
  256. return -EINVAL;
  257. if (!timeout_ms)
  258. timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
  259. if (!use_polling) {
  260. init_completion(&nfc->complete);
  261. writel(events, nfc->regs + NFC_REG_INT);
  262. ret = wait_for_completion_timeout(&nfc->complete,
  263. msecs_to_jiffies(timeout_ms));
  264. if (!ret)
  265. ret = -ETIMEDOUT;
  266. else
  267. ret = 0;
  268. writel(0, nfc->regs + NFC_REG_INT);
  269. } else {
  270. u32 status;
  271. ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
  272. (status & events) == events, 1,
  273. timeout_ms * 1000);
  274. }
  275. writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
  276. if (ret)
  277. dev_err(nfc->dev, "wait interrupt timedout\n");
  278. return ret;
  279. }
  280. static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
  281. {
  282. u32 status;
  283. int ret;
  284. ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
  285. !(status & NFC_CMD_FIFO_STATUS), 1,
  286. NFC_DEFAULT_TIMEOUT_MS * 1000);
  287. if (ret)
  288. dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
  289. return ret;
  290. }
  291. static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
  292. {
  293. u32 ctl;
  294. int ret;
  295. writel(0, nfc->regs + NFC_REG_ECC_CTL);
  296. writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
  297. ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
  298. !(ctl & NFC_RESET), 1,
  299. NFC_DEFAULT_TIMEOUT_MS * 1000);
  300. if (ret)
  301. dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
  302. return ret;
  303. }
  304. static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
  305. int chunksize, int nchunks,
  306. enum dma_data_direction ddir,
  307. struct scatterlist *sg)
  308. {
  309. struct dma_async_tx_descriptor *dmad;
  310. enum dma_transfer_direction tdir;
  311. dma_cookie_t dmat;
  312. int ret;
  313. if (ddir == DMA_FROM_DEVICE)
  314. tdir = DMA_DEV_TO_MEM;
  315. else
  316. tdir = DMA_MEM_TO_DEV;
  317. sg_init_one(sg, buf, nchunks * chunksize);
  318. ret = dma_map_sg(nfc->dev, sg, 1, ddir);
  319. if (!ret)
  320. return -ENOMEM;
  321. if (!nfc->caps->has_mdma) {
  322. dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
  323. if (!dmad) {
  324. ret = -EINVAL;
  325. goto err_unmap_buf;
  326. }
  327. }
  328. writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
  329. nfc->regs + NFC_REG_CTL);
  330. writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
  331. writel(chunksize, nfc->regs + NFC_REG_CNT);
  332. if (nfc->caps->has_mdma) {
  333. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_DMA_TYPE_NORMAL,
  334. nfc->regs + NFC_REG_CTL);
  335. writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
  336. writel(sg_dma_address(sg), nfc->regs + NFC_REG_MDMA_ADDR);
  337. } else {
  338. dmat = dmaengine_submit(dmad);
  339. ret = dma_submit_error(dmat);
  340. if (ret)
  341. goto err_clr_dma_flag;
  342. }
  343. return 0;
  344. err_clr_dma_flag:
  345. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
  346. nfc->regs + NFC_REG_CTL);
  347. err_unmap_buf:
  348. dma_unmap_sg(nfc->dev, sg, 1, ddir);
  349. return ret;
  350. }
  351. static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc,
  352. enum dma_data_direction ddir,
  353. struct scatterlist *sg)
  354. {
  355. dma_unmap_sg(nfc->dev, sg, 1, ddir);
  356. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
  357. nfc->regs + NFC_REG_CTL);
  358. }
  359. static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs)
  360. {
  361. struct mtd_info *mtd = nand_to_mtd(nand);
  362. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  363. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  364. struct sunxi_nand_chip_sel *sel;
  365. u32 ctl;
  366. if (cs > 0 && cs >= sunxi_nand->nsels)
  367. return;
  368. ctl = readl(nfc->regs + NFC_REG_CTL) &
  369. ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
  370. sel = &sunxi_nand->sels[cs];
  371. ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift);
  372. if (sel->rb >= 0)
  373. ctl |= NFC_RB_SEL(sel->rb);
  374. writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
  375. if (nfc->clk_rate != sunxi_nand->clk_rate) {
  376. clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
  377. nfc->clk_rate = sunxi_nand->clk_rate;
  378. }
  379. writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
  380. writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
  381. writel(ctl, nfc->regs + NFC_REG_CTL);
  382. }
  383. static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len)
  384. {
  385. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  386. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  387. int ret;
  388. int cnt;
  389. int offs = 0;
  390. u32 tmp;
  391. while (len > offs) {
  392. bool poll = false;
  393. cnt = min(len - offs, NFC_SRAM_SIZE);
  394. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  395. if (ret)
  396. break;
  397. writel(cnt, nfc->regs + NFC_REG_CNT);
  398. tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
  399. writel(tmp, nfc->regs + NFC_REG_CMD);
  400. /* Arbitrary limit for polling mode */
  401. if (cnt < 64)
  402. poll = true;
  403. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
  404. if (ret)
  405. break;
  406. if (buf)
  407. memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
  408. cnt);
  409. offs += cnt;
  410. }
  411. }
  412. static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf,
  413. int len)
  414. {
  415. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  416. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  417. int ret;
  418. int cnt;
  419. int offs = 0;
  420. u32 tmp;
  421. while (len > offs) {
  422. bool poll = false;
  423. cnt = min(len - offs, NFC_SRAM_SIZE);
  424. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  425. if (ret)
  426. break;
  427. writel(cnt, nfc->regs + NFC_REG_CNT);
  428. memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
  429. tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
  430. NFC_ACCESS_DIR;
  431. writel(tmp, nfc->regs + NFC_REG_CMD);
  432. /* Arbitrary limit for polling mode */
  433. if (cnt < 64)
  434. poll = true;
  435. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
  436. if (ret)
  437. break;
  438. offs += cnt;
  439. }
  440. }
  441. /* These seed values have been extracted from Allwinner's BSP */
  442. static const u16 sunxi_nfc_randomizer_page_seeds[] = {
  443. 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
  444. 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
  445. 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
  446. 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
  447. 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
  448. 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
  449. 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
  450. 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
  451. 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
  452. 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
  453. 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
  454. 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
  455. 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
  456. 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
  457. 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
  458. 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
  459. };
  460. /*
  461. * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
  462. * have been generated using
  463. * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
  464. * the randomizer engine does internally before de/scrambling OOB data.
  465. *
  466. * Those tables are statically defined to avoid calculating randomizer state
  467. * at runtime.
  468. */
  469. static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
  470. 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
  471. 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
  472. 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
  473. 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
  474. 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
  475. 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
  476. 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
  477. 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
  478. 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
  479. 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
  480. 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
  481. 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
  482. 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
  483. 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
  484. 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
  485. 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
  486. };
  487. static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
  488. 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
  489. 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
  490. 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
  491. 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
  492. 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
  493. 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
  494. 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
  495. 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
  496. 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
  497. 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
  498. 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
  499. 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
  500. 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
  501. 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
  502. 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
  503. 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
  504. };
  505. static u16 sunxi_nfc_randomizer_step(u16 state, int count)
  506. {
  507. state &= 0x7fff;
  508. /*
  509. * This loop is just a simple implementation of a Fibonacci LFSR using
  510. * the x16 + x15 + 1 polynomial.
  511. */
  512. while (count--)
  513. state = ((state >> 1) |
  514. (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
  515. return state;
  516. }
  517. static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page,
  518. bool ecc)
  519. {
  520. struct mtd_info *mtd = nand_to_mtd(nand);
  521. const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
  522. int mod = mtd_div_by_ws(mtd->erasesize, mtd);
  523. if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
  524. mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
  525. if (ecc) {
  526. if (mtd->ecc_step_size == 512)
  527. seeds = sunxi_nfc_randomizer_ecc512_seeds;
  528. else
  529. seeds = sunxi_nfc_randomizer_ecc1024_seeds;
  530. }
  531. return seeds[page % mod];
  532. }
  533. static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page,
  534. bool ecc)
  535. {
  536. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  537. u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  538. u16 state;
  539. if (!(nand->options & NAND_NEED_SCRAMBLING))
  540. return;
  541. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  542. state = sunxi_nfc_randomizer_state(nand, page, ecc);
  543. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
  544. writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
  545. }
  546. static void sunxi_nfc_randomizer_enable(struct nand_chip *nand)
  547. {
  548. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  549. if (!(nand->options & NAND_NEED_SCRAMBLING))
  550. return;
  551. writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
  552. nfc->regs + NFC_REG_ECC_CTL);
  553. }
  554. static void sunxi_nfc_randomizer_disable(struct nand_chip *nand)
  555. {
  556. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  557. if (!(nand->options & NAND_NEED_SCRAMBLING))
  558. return;
  559. writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
  560. nfc->regs + NFC_REG_ECC_CTL);
  561. }
  562. static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm)
  563. {
  564. u16 state = sunxi_nfc_randomizer_state(nand, page, true);
  565. bbm[0] ^= state;
  566. bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
  567. }
  568. static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand,
  569. const uint8_t *buf, int len,
  570. bool ecc, int page)
  571. {
  572. sunxi_nfc_randomizer_config(nand, page, ecc);
  573. sunxi_nfc_randomizer_enable(nand);
  574. sunxi_nfc_write_buf(nand, buf, len);
  575. sunxi_nfc_randomizer_disable(nand);
  576. }
  577. static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf,
  578. int len, bool ecc, int page)
  579. {
  580. sunxi_nfc_randomizer_config(nand, page, ecc);
  581. sunxi_nfc_randomizer_enable(nand);
  582. sunxi_nfc_read_buf(nand, buf, len);
  583. sunxi_nfc_randomizer_disable(nand);
  584. }
  585. static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand)
  586. {
  587. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  588. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  589. u32 ecc_ctl;
  590. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  591. ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
  592. NFC_ECC_BLOCK_SIZE_MSK);
  593. ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(sunxi_nand->ecc->mode) |
  594. NFC_ECC_EXCEPTION | NFC_ECC_PIPELINE;
  595. if (nand->ecc.size == 512)
  596. ecc_ctl |= NFC_ECC_BLOCK_512;
  597. writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
  598. }
  599. static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand)
  600. {
  601. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  602. writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
  603. nfc->regs + NFC_REG_ECC_CTL);
  604. }
  605. static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
  606. {
  607. buf[0] = user_data;
  608. buf[1] = user_data >> 8;
  609. buf[2] = user_data >> 16;
  610. buf[3] = user_data >> 24;
  611. }
  612. static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
  613. {
  614. return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
  615. }
  616. static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob,
  617. int step, bool bbm, int page)
  618. {
  619. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  620. sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
  621. oob);
  622. /* De-randomize the Bad Block Marker. */
  623. if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
  624. sunxi_nfc_randomize_bbm(nand, page, oob);
  625. }
  626. static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand,
  627. const u8 *oob, int step,
  628. bool bbm, int page)
  629. {
  630. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  631. u8 user_data[4];
  632. /* Randomize the Bad Block Marker. */
  633. if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
  634. memcpy(user_data, oob, sizeof(user_data));
  635. sunxi_nfc_randomize_bbm(nand, page, user_data);
  636. oob = user_data;
  637. }
  638. writel(sunxi_nfc_buf_to_user_data(oob),
  639. nfc->regs + NFC_REG_USER_DATA(step));
  640. }
  641. static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand,
  642. unsigned int *max_bitflips, int ret)
  643. {
  644. struct mtd_info *mtd = nand_to_mtd(nand);
  645. if (ret < 0) {
  646. mtd->ecc_stats.failed++;
  647. } else {
  648. mtd->ecc_stats.corrected += ret;
  649. *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
  650. }
  651. }
  652. static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
  653. int step, u32 status, bool *erased)
  654. {
  655. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  656. struct nand_ecc_ctrl *ecc = &nand->ecc;
  657. u32 tmp;
  658. *erased = false;
  659. if (status & NFC_ECC_ERR(step))
  660. return -EBADMSG;
  661. if (status & NFC_ECC_PAT_FOUND(step)) {
  662. u8 pattern;
  663. if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
  664. pattern = 0x0;
  665. } else {
  666. pattern = 0xff;
  667. *erased = true;
  668. }
  669. if (data)
  670. memset(data, pattern, ecc->size);
  671. if (oob)
  672. memset(oob, pattern, ecc->bytes + 4);
  673. return 0;
  674. }
  675. tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
  676. return NFC_ECC_ERR_CNT(step, tmp);
  677. }
  678. static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
  679. u8 *data, int data_off,
  680. u8 *oob, int oob_off,
  681. int *cur_off,
  682. unsigned int *max_bitflips,
  683. bool bbm, bool oob_required, int page)
  684. {
  685. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  686. struct nand_ecc_ctrl *ecc = &nand->ecc;
  687. int raw_mode = 0;
  688. bool erased;
  689. int ret;
  690. if (*cur_off != data_off)
  691. nand_change_read_column_op(nand, data_off, NULL, 0, false);
  692. sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page);
  693. if (data_off + ecc->size != oob_off)
  694. nand_change_read_column_op(nand, oob_off, NULL, 0, false);
  695. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  696. if (ret)
  697. return ret;
  698. sunxi_nfc_randomizer_enable(nand);
  699. writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
  700. nfc->regs + NFC_REG_CMD);
  701. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
  702. sunxi_nfc_randomizer_disable(nand);
  703. if (ret)
  704. return ret;
  705. *cur_off = oob_off + ecc->bytes + 4;
  706. ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0,
  707. readl(nfc->regs + NFC_REG_ECC_ST),
  708. &erased);
  709. if (erased)
  710. return 1;
  711. if (ret < 0) {
  712. /*
  713. * Re-read the data with the randomizer disabled to identify
  714. * bitflips in erased pages.
  715. */
  716. if (nand->options & NAND_NEED_SCRAMBLING)
  717. nand_change_read_column_op(nand, data_off, data,
  718. ecc->size, false);
  719. else
  720. memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
  721. ecc->size);
  722. nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
  723. false);
  724. ret = nand_check_erased_ecc_chunk(data, ecc->size,
  725. oob, ecc->bytes + 4,
  726. NULL, 0, ecc->strength);
  727. if (ret >= 0)
  728. raw_mode = 1;
  729. } else {
  730. memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
  731. if (oob_required) {
  732. nand_change_read_column_op(nand, oob_off, NULL, 0,
  733. false);
  734. sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4,
  735. true, page);
  736. sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0,
  737. bbm, page);
  738. }
  739. }
  740. sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret);
  741. return raw_mode;
  742. }
  743. static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand,
  744. u8 *oob, int *cur_off,
  745. bool randomize, int page)
  746. {
  747. struct mtd_info *mtd = nand_to_mtd(nand);
  748. struct nand_ecc_ctrl *ecc = &nand->ecc;
  749. int offset = ((ecc->bytes + 4) * ecc->steps);
  750. int len = mtd->oobsize - offset;
  751. if (len <= 0)
  752. return;
  753. if (!cur_off || *cur_off != offset)
  754. nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
  755. false);
  756. if (!randomize)
  757. sunxi_nfc_read_buf(nand, oob + offset, len);
  758. else
  759. sunxi_nfc_randomizer_read_buf(nand, oob + offset, len,
  760. false, page);
  761. if (cur_off)
  762. *cur_off = mtd->oobsize + mtd->writesize;
  763. }
  764. static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf,
  765. int oob_required, int page,
  766. int nchunks)
  767. {
  768. bool randomized = nand->options & NAND_NEED_SCRAMBLING;
  769. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  770. struct mtd_info *mtd = nand_to_mtd(nand);
  771. struct nand_ecc_ctrl *ecc = &nand->ecc;
  772. unsigned int max_bitflips = 0;
  773. int ret, i, raw_mode = 0;
  774. struct scatterlist sg;
  775. u32 status, wait;
  776. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  777. if (ret)
  778. return ret;
  779. ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks,
  780. DMA_FROM_DEVICE, &sg);
  781. if (ret)
  782. return ret;
  783. sunxi_nfc_hw_ecc_enable(nand);
  784. sunxi_nfc_randomizer_config(nand, page, false);
  785. sunxi_nfc_randomizer_enable(nand);
  786. writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
  787. NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
  788. wait = NFC_CMD_INT_FLAG;
  789. if (nfc->caps->has_mdma)
  790. wait |= NFC_DMA_INT_FLAG;
  791. else
  792. dma_async_issue_pending(nfc->dmac);
  793. writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
  794. nfc->regs + NFC_REG_CMD);
  795. ret = sunxi_nfc_wait_events(nfc, wait, false, 0);
  796. if (ret && !nfc->caps->has_mdma)
  797. dmaengine_terminate_all(nfc->dmac);
  798. sunxi_nfc_randomizer_disable(nand);
  799. sunxi_nfc_hw_ecc_disable(nand);
  800. sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg);
  801. if (ret)
  802. return ret;
  803. status = readl(nfc->regs + NFC_REG_ECC_ST);
  804. for (i = 0; i < nchunks; i++) {
  805. int data_off = i * ecc->size;
  806. int oob_off = i * (ecc->bytes + 4);
  807. u8 *data = buf + data_off;
  808. u8 *oob = nand->oob_poi + oob_off;
  809. bool erased;
  810. ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL,
  811. oob_required ? oob : NULL,
  812. i, status, &erased);
  813. /* ECC errors are handled in the second loop. */
  814. if (ret < 0)
  815. continue;
  816. if (oob_required && !erased) {
  817. /* TODO: use DMA to retrieve OOB */
  818. nand_change_read_column_op(nand,
  819. mtd->writesize + oob_off,
  820. oob, ecc->bytes + 4, false);
  821. sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i,
  822. !i, page);
  823. }
  824. if (erased)
  825. raw_mode = 1;
  826. sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
  827. }
  828. if (status & NFC_ECC_ERR_MSK) {
  829. for (i = 0; i < nchunks; i++) {
  830. int data_off = i * ecc->size;
  831. int oob_off = i * (ecc->bytes + 4);
  832. u8 *data = buf + data_off;
  833. u8 *oob = nand->oob_poi + oob_off;
  834. if (!(status & NFC_ECC_ERR(i)))
  835. continue;
  836. /*
  837. * Re-read the data with the randomizer disabled to
  838. * identify bitflips in erased pages.
  839. * TODO: use DMA to read page in raw mode
  840. */
  841. if (randomized)
  842. nand_change_read_column_op(nand, data_off,
  843. data, ecc->size,
  844. false);
  845. /* TODO: use DMA to retrieve OOB */
  846. nand_change_read_column_op(nand,
  847. mtd->writesize + oob_off,
  848. oob, ecc->bytes + 4, false);
  849. ret = nand_check_erased_ecc_chunk(data, ecc->size,
  850. oob, ecc->bytes + 4,
  851. NULL, 0,
  852. ecc->strength);
  853. if (ret >= 0)
  854. raw_mode = 1;
  855. sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
  856. }
  857. }
  858. if (oob_required)
  859. sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi,
  860. NULL, !raw_mode,
  861. page);
  862. return max_bitflips;
  863. }
  864. static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
  865. const u8 *data, int data_off,
  866. const u8 *oob, int oob_off,
  867. int *cur_off, bool bbm,
  868. int page)
  869. {
  870. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  871. struct nand_ecc_ctrl *ecc = &nand->ecc;
  872. int ret;
  873. if (data_off != *cur_off)
  874. nand_change_write_column_op(nand, data_off, NULL, 0, false);
  875. sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page);
  876. if (data_off + ecc->size != oob_off)
  877. nand_change_write_column_op(nand, oob_off, NULL, 0, false);
  878. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  879. if (ret)
  880. return ret;
  881. sunxi_nfc_randomizer_enable(nand);
  882. sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
  883. writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
  884. NFC_ACCESS_DIR | NFC_ECC_OP,
  885. nfc->regs + NFC_REG_CMD);
  886. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
  887. sunxi_nfc_randomizer_disable(nand);
  888. if (ret)
  889. return ret;
  890. *cur_off = oob_off + ecc->bytes + 4;
  891. return 0;
  892. }
  893. static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand,
  894. u8 *oob, int *cur_off,
  895. int page)
  896. {
  897. struct mtd_info *mtd = nand_to_mtd(nand);
  898. struct nand_ecc_ctrl *ecc = &nand->ecc;
  899. int offset = ((ecc->bytes + 4) * ecc->steps);
  900. int len = mtd->oobsize - offset;
  901. if (len <= 0)
  902. return;
  903. if (!cur_off || *cur_off != offset)
  904. nand_change_write_column_op(nand, offset + mtd->writesize,
  905. NULL, 0, false);
  906. sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page);
  907. if (cur_off)
  908. *cur_off = mtd->oobsize + mtd->writesize;
  909. }
  910. static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf,
  911. int oob_required, int page)
  912. {
  913. struct mtd_info *mtd = nand_to_mtd(nand);
  914. struct nand_ecc_ctrl *ecc = &nand->ecc;
  915. unsigned int max_bitflips = 0;
  916. int ret, i, cur_off = 0;
  917. bool raw_mode = false;
  918. sunxi_nfc_select_chip(nand, nand->cur_cs);
  919. nand_read_page_op(nand, page, 0, NULL, 0);
  920. sunxi_nfc_hw_ecc_enable(nand);
  921. for (i = 0; i < ecc->steps; i++) {
  922. int data_off = i * ecc->size;
  923. int oob_off = i * (ecc->bytes + 4);
  924. u8 *data = buf + data_off;
  925. u8 *oob = nand->oob_poi + oob_off;
  926. ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob,
  927. oob_off + mtd->writesize,
  928. &cur_off, &max_bitflips,
  929. !i, oob_required, page);
  930. if (ret < 0)
  931. return ret;
  932. else if (ret)
  933. raw_mode = true;
  934. }
  935. if (oob_required)
  936. sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off,
  937. !raw_mode, page);
  938. sunxi_nfc_hw_ecc_disable(nand);
  939. return max_bitflips;
  940. }
  941. static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf,
  942. int oob_required, int page)
  943. {
  944. int ret;
  945. sunxi_nfc_select_chip(nand, nand->cur_cs);
  946. nand_read_page_op(nand, page, 0, NULL, 0);
  947. ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page,
  948. nand->ecc.steps);
  949. if (ret >= 0)
  950. return ret;
  951. /* Fallback to PIO mode */
  952. return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page);
  953. }
  954. static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand,
  955. u32 data_offs, u32 readlen,
  956. u8 *bufpoi, int page)
  957. {
  958. struct mtd_info *mtd = nand_to_mtd(nand);
  959. struct nand_ecc_ctrl *ecc = &nand->ecc;
  960. int ret, i, cur_off = 0;
  961. unsigned int max_bitflips = 0;
  962. sunxi_nfc_select_chip(nand, nand->cur_cs);
  963. nand_read_page_op(nand, page, 0, NULL, 0);
  964. sunxi_nfc_hw_ecc_enable(nand);
  965. for (i = data_offs / ecc->size;
  966. i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
  967. int data_off = i * ecc->size;
  968. int oob_off = i * (ecc->bytes + 4);
  969. u8 *data = bufpoi + data_off;
  970. u8 *oob = nand->oob_poi + oob_off;
  971. ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off,
  972. oob,
  973. oob_off + mtd->writesize,
  974. &cur_off, &max_bitflips, !i,
  975. false, page);
  976. if (ret < 0)
  977. return ret;
  978. }
  979. sunxi_nfc_hw_ecc_disable(nand);
  980. return max_bitflips;
  981. }
  982. static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand,
  983. u32 data_offs, u32 readlen,
  984. u8 *buf, int page)
  985. {
  986. int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size);
  987. int ret;
  988. sunxi_nfc_select_chip(nand, nand->cur_cs);
  989. nand_read_page_op(nand, page, 0, NULL, 0);
  990. ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks);
  991. if (ret >= 0)
  992. return ret;
  993. /* Fallback to PIO mode */
  994. return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen,
  995. buf, page);
  996. }
  997. static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand,
  998. const uint8_t *buf, int oob_required,
  999. int page)
  1000. {
  1001. struct mtd_info *mtd = nand_to_mtd(nand);
  1002. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1003. int ret, i, cur_off = 0;
  1004. sunxi_nfc_select_chip(nand, nand->cur_cs);
  1005. nand_prog_page_begin_op(nand, page, 0, NULL, 0);
  1006. sunxi_nfc_hw_ecc_enable(nand);
  1007. for (i = 0; i < ecc->steps; i++) {
  1008. int data_off = i * ecc->size;
  1009. int oob_off = i * (ecc->bytes + 4);
  1010. const u8 *data = buf + data_off;
  1011. const u8 *oob = nand->oob_poi + oob_off;
  1012. ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
  1013. oob_off + mtd->writesize,
  1014. &cur_off, !i, page);
  1015. if (ret)
  1016. return ret;
  1017. }
  1018. if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
  1019. sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
  1020. &cur_off, page);
  1021. sunxi_nfc_hw_ecc_disable(nand);
  1022. return nand_prog_page_end_op(nand);
  1023. }
  1024. static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand,
  1025. u32 data_offs, u32 data_len,
  1026. const u8 *buf, int oob_required,
  1027. int page)
  1028. {
  1029. struct mtd_info *mtd = nand_to_mtd(nand);
  1030. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1031. int ret, i, cur_off = 0;
  1032. sunxi_nfc_select_chip(nand, nand->cur_cs);
  1033. nand_prog_page_begin_op(nand, page, 0, NULL, 0);
  1034. sunxi_nfc_hw_ecc_enable(nand);
  1035. for (i = data_offs / ecc->size;
  1036. i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
  1037. int data_off = i * ecc->size;
  1038. int oob_off = i * (ecc->bytes + 4);
  1039. const u8 *data = buf + data_off;
  1040. const u8 *oob = nand->oob_poi + oob_off;
  1041. ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
  1042. oob_off + mtd->writesize,
  1043. &cur_off, !i, page);
  1044. if (ret)
  1045. return ret;
  1046. }
  1047. sunxi_nfc_hw_ecc_disable(nand);
  1048. return nand_prog_page_end_op(nand);
  1049. }
  1050. static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
  1051. const u8 *buf,
  1052. int oob_required,
  1053. int page)
  1054. {
  1055. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  1056. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1057. struct scatterlist sg;
  1058. u32 wait;
  1059. int ret, i;
  1060. sunxi_nfc_select_chip(nand, nand->cur_cs);
  1061. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  1062. if (ret)
  1063. return ret;
  1064. ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps,
  1065. DMA_TO_DEVICE, &sg);
  1066. if (ret)
  1067. goto pio_fallback;
  1068. for (i = 0; i < ecc->steps; i++) {
  1069. const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
  1070. sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page);
  1071. }
  1072. nand_prog_page_begin_op(nand, page, 0, NULL, 0);
  1073. sunxi_nfc_hw_ecc_enable(nand);
  1074. sunxi_nfc_randomizer_config(nand, page, false);
  1075. sunxi_nfc_randomizer_enable(nand);
  1076. writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
  1077. nfc->regs + NFC_REG_WCMD_SET);
  1078. wait = NFC_CMD_INT_FLAG;
  1079. if (nfc->caps->has_mdma)
  1080. wait |= NFC_DMA_INT_FLAG;
  1081. else
  1082. dma_async_issue_pending(nfc->dmac);
  1083. writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
  1084. NFC_DATA_TRANS | NFC_ACCESS_DIR,
  1085. nfc->regs + NFC_REG_CMD);
  1086. ret = sunxi_nfc_wait_events(nfc, wait, false, 0);
  1087. if (ret && !nfc->caps->has_mdma)
  1088. dmaengine_terminate_all(nfc->dmac);
  1089. sunxi_nfc_randomizer_disable(nand);
  1090. sunxi_nfc_hw_ecc_disable(nand);
  1091. sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg);
  1092. if (ret)
  1093. return ret;
  1094. if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
  1095. /* TODO: use DMA to transfer extra OOB bytes ? */
  1096. sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
  1097. NULL, page);
  1098. return nand_prog_page_end_op(nand);
  1099. pio_fallback:
  1100. return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page);
  1101. }
  1102. static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
  1103. {
  1104. u8 *buf = nand_get_data_buf(nand);
  1105. return nand->ecc.read_page(nand, buf, 1, page);
  1106. }
  1107. static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
  1108. {
  1109. struct mtd_info *mtd = nand_to_mtd(nand);
  1110. u8 *buf = nand_get_data_buf(nand);
  1111. int ret;
  1112. memset(buf, 0xff, mtd->writesize);
  1113. ret = nand->ecc.write_page(nand, buf, 1, page);
  1114. if (ret)
  1115. return ret;
  1116. /* Send command to program the OOB data */
  1117. return nand_prog_page_end_op(nand);
  1118. }
  1119. static const s32 tWB_lut[] = {6, 12, 16, 20};
  1120. static const s32 tRHW_lut[] = {4, 8, 12, 20};
  1121. static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
  1122. u32 clk_period)
  1123. {
  1124. u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
  1125. int i;
  1126. for (i = 0; i < lut_size; i++) {
  1127. if (clk_cycles <= lut[i])
  1128. return i;
  1129. }
  1130. /* Doesn't fit */
  1131. return -EINVAL;
  1132. }
  1133. #define sunxi_nand_lookup_timing(l, p, c) \
  1134. _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
  1135. static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline,
  1136. const struct nand_interface_config *conf)
  1137. {
  1138. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1139. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  1140. const struct nand_sdr_timings *timings;
  1141. u32 min_clk_period = 0;
  1142. s32 tWB, tADL, tWHR, tRHW, tCAD;
  1143. long real_clk_rate;
  1144. timings = nand_get_sdr_timings(conf);
  1145. if (IS_ERR(timings))
  1146. return -ENOTSUPP;
  1147. /* T1 <=> tCLS */
  1148. if (timings->tCLS_min > min_clk_period)
  1149. min_clk_period = timings->tCLS_min;
  1150. /* T2 <=> tCLH */
  1151. if (timings->tCLH_min > min_clk_period)
  1152. min_clk_period = timings->tCLH_min;
  1153. /* T3 <=> tCS */
  1154. if (timings->tCS_min > min_clk_period)
  1155. min_clk_period = timings->tCS_min;
  1156. /* T4 <=> tCH */
  1157. if (timings->tCH_min > min_clk_period)
  1158. min_clk_period = timings->tCH_min;
  1159. /* T5 <=> tWP */
  1160. if (timings->tWP_min > min_clk_period)
  1161. min_clk_period = timings->tWP_min;
  1162. /* T6 <=> tWH */
  1163. if (timings->tWH_min > min_clk_period)
  1164. min_clk_period = timings->tWH_min;
  1165. /* T7 <=> tALS */
  1166. if (timings->tALS_min > min_clk_period)
  1167. min_clk_period = timings->tALS_min;
  1168. /* T8 <=> tDS */
  1169. if (timings->tDS_min > min_clk_period)
  1170. min_clk_period = timings->tDS_min;
  1171. /* T9 <=> tDH */
  1172. if (timings->tDH_min > min_clk_period)
  1173. min_clk_period = timings->tDH_min;
  1174. /* T10 <=> tRR */
  1175. if (timings->tRR_min > (min_clk_period * 3))
  1176. min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
  1177. /* T11 <=> tALH */
  1178. if (timings->tALH_min > min_clk_period)
  1179. min_clk_period = timings->tALH_min;
  1180. /* T12 <=> tRP */
  1181. if (timings->tRP_min > min_clk_period)
  1182. min_clk_period = timings->tRP_min;
  1183. /* T13 <=> tREH */
  1184. if (timings->tREH_min > min_clk_period)
  1185. min_clk_period = timings->tREH_min;
  1186. /* T14 <=> tRC */
  1187. if (timings->tRC_min > (min_clk_period * 2))
  1188. min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
  1189. /* T15 <=> tWC */
  1190. if (timings->tWC_min > (min_clk_period * 2))
  1191. min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
  1192. /* T16 - T19 + tCAD */
  1193. if (timings->tWB_max > (min_clk_period * 20))
  1194. min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
  1195. if (timings->tADL_min > (min_clk_period * 32))
  1196. min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
  1197. if (timings->tWHR_min > (min_clk_period * 32))
  1198. min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
  1199. if (timings->tRHW_min > (min_clk_period * 20))
  1200. min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
  1201. /*
  1202. * In non-EDO, tREA should be less than tRP to guarantee that the
  1203. * controller does not sample the IO lines too early. Unfortunately,
  1204. * the sunxi NAND controller does not allow us to have different
  1205. * values for tRP and tREH (tRP = tREH = tRW / 2).
  1206. *
  1207. * We have 2 options to overcome this limitation:
  1208. *
  1209. * 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint
  1210. * 2/ Use EDO mode (only works if timings->tRLOH > 0)
  1211. */
  1212. if (timings->tREA_max > min_clk_period && !timings->tRLOH_min)
  1213. min_clk_period = timings->tREA_max;
  1214. tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
  1215. min_clk_period);
  1216. if (tWB < 0) {
  1217. dev_err(nfc->dev, "unsupported tWB\n");
  1218. return tWB;
  1219. }
  1220. tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
  1221. if (tADL > 3) {
  1222. dev_err(nfc->dev, "unsupported tADL\n");
  1223. return -EINVAL;
  1224. }
  1225. tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
  1226. if (tWHR > 3) {
  1227. dev_err(nfc->dev, "unsupported tWHR\n");
  1228. return -EINVAL;
  1229. }
  1230. tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
  1231. min_clk_period);
  1232. if (tRHW < 0) {
  1233. dev_err(nfc->dev, "unsupported tRHW\n");
  1234. return tRHW;
  1235. }
  1236. if (csline == NAND_DATA_IFACE_CHECK_ONLY)
  1237. return 0;
  1238. /*
  1239. * TODO: according to ONFI specs this value only applies for DDR NAND,
  1240. * but Allwinner seems to set this to 0x7. Mimic them for now.
  1241. */
  1242. tCAD = 0x7;
  1243. /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
  1244. sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
  1245. /* Convert min_clk_period from picoseconds to nanoseconds */
  1246. min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
  1247. /*
  1248. * Unlike what is stated in Allwinner datasheet, the clk_rate should
  1249. * be set to (1 / min_clk_period), and not (2 / min_clk_period).
  1250. * This new formula was verified with a scope and validated by
  1251. * Allwinner engineers.
  1252. */
  1253. sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period;
  1254. real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate);
  1255. if (real_clk_rate <= 0) {
  1256. dev_err(nfc->dev, "Unable to round clk %lu\n",
  1257. sunxi_nand->clk_rate);
  1258. return -EINVAL;
  1259. }
  1260. sunxi_nand->timing_ctl = 0;
  1261. /*
  1262. * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
  1263. * output cycle timings shall be used if the host drives tRC less than
  1264. * 30 ns. We should also use EDO mode if tREA is bigger than tRP.
  1265. */
  1266. min_clk_period = NSEC_PER_SEC / real_clk_rate;
  1267. if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max)
  1268. sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO;
  1269. return 0;
  1270. }
  1271. static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  1272. struct mtd_oob_region *oobregion)
  1273. {
  1274. struct nand_chip *nand = mtd_to_nand(mtd);
  1275. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1276. if (section >= ecc->steps)
  1277. return -ERANGE;
  1278. oobregion->offset = section * (ecc->bytes + 4) + 4;
  1279. oobregion->length = ecc->bytes;
  1280. return 0;
  1281. }
  1282. static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
  1283. struct mtd_oob_region *oobregion)
  1284. {
  1285. struct nand_chip *nand = mtd_to_nand(mtd);
  1286. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1287. if (section > ecc->steps)
  1288. return -ERANGE;
  1289. /*
  1290. * The first 2 bytes are used for BB markers, hence we
  1291. * only have 2 bytes available in the first user data
  1292. * section.
  1293. */
  1294. if (!section && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
  1295. oobregion->offset = 2;
  1296. oobregion->length = 2;
  1297. return 0;
  1298. }
  1299. oobregion->offset = section * (ecc->bytes + 4);
  1300. if (section < ecc->steps)
  1301. oobregion->length = 4;
  1302. else
  1303. oobregion->offset = mtd->oobsize - oobregion->offset;
  1304. return 0;
  1305. }
  1306. static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
  1307. .ecc = sunxi_nand_ooblayout_ecc,
  1308. .free = sunxi_nand_ooblayout_free,
  1309. };
  1310. static void sunxi_nand_hw_ecc_ctrl_cleanup(struct sunxi_nand_chip *sunxi_nand)
  1311. {
  1312. kfree(sunxi_nand->ecc);
  1313. }
  1314. static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
  1315. struct nand_ecc_ctrl *ecc,
  1316. struct device_node *np)
  1317. {
  1318. static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
  1319. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1320. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  1321. struct mtd_info *mtd = nand_to_mtd(nand);
  1322. struct nand_device *nanddev = mtd_to_nanddev(mtd);
  1323. int nsectors;
  1324. int ret;
  1325. int i;
  1326. if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
  1327. int bytes;
  1328. ecc->size = 1024;
  1329. nsectors = mtd->writesize / ecc->size;
  1330. /* Reserve 2 bytes for the BBM */
  1331. bytes = (mtd->oobsize - 2) / nsectors;
  1332. /* 4 non-ECC bytes are added before each ECC bytes section */
  1333. bytes -= 4;
  1334. /* and bytes has to be even. */
  1335. if (bytes % 2)
  1336. bytes--;
  1337. ecc->strength = bytes * 8 / fls(8 * ecc->size);
  1338. for (i = 0; i < ARRAY_SIZE(strengths); i++) {
  1339. if (strengths[i] > ecc->strength)
  1340. break;
  1341. }
  1342. if (!i)
  1343. ecc->strength = 0;
  1344. else
  1345. ecc->strength = strengths[i - 1];
  1346. }
  1347. if (ecc->size != 512 && ecc->size != 1024)
  1348. return -EINVAL;
  1349. sunxi_nand->ecc = kzalloc(sizeof(*sunxi_nand->ecc), GFP_KERNEL);
  1350. if (!sunxi_nand->ecc)
  1351. return -ENOMEM;
  1352. /* Prefer 1k ECC chunk over 512 ones */
  1353. if (ecc->size == 512 && mtd->writesize > 512) {
  1354. ecc->size = 1024;
  1355. ecc->strength *= 2;
  1356. }
  1357. /* Add ECC info retrieval from DT */
  1358. for (i = 0; i < ARRAY_SIZE(strengths); i++) {
  1359. if (ecc->strength <= strengths[i]) {
  1360. /*
  1361. * Update ecc->strength value with the actual strength
  1362. * that will be used by the ECC engine.
  1363. */
  1364. ecc->strength = strengths[i];
  1365. break;
  1366. }
  1367. }
  1368. if (i >= ARRAY_SIZE(strengths)) {
  1369. dev_err(nfc->dev, "unsupported strength\n");
  1370. ret = -ENOTSUPP;
  1371. goto err;
  1372. }
  1373. sunxi_nand->ecc->mode = i;
  1374. /* HW ECC always request ECC bytes for 1024 bytes blocks */
  1375. ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
  1376. /* HW ECC always work with even numbers of ECC bytes */
  1377. ecc->bytes = ALIGN(ecc->bytes, 2);
  1378. nsectors = mtd->writesize / ecc->size;
  1379. if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
  1380. ret = -EINVAL;
  1381. goto err;
  1382. }
  1383. ecc->read_oob = sunxi_nfc_hw_ecc_read_oob;
  1384. ecc->write_oob = sunxi_nfc_hw_ecc_write_oob;
  1385. mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
  1386. if (nfc->dmac || nfc->caps->has_mdma) {
  1387. ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
  1388. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
  1389. ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
  1390. nand->options |= NAND_USES_DMA;
  1391. } else {
  1392. ecc->read_page = sunxi_nfc_hw_ecc_read_page;
  1393. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
  1394. ecc->write_page = sunxi_nfc_hw_ecc_write_page;
  1395. }
  1396. /* TODO: support DMA for raw accesses and subpage write */
  1397. ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
  1398. ecc->read_oob_raw = nand_read_oob_std;
  1399. ecc->write_oob_raw = nand_write_oob_std;
  1400. return 0;
  1401. err:
  1402. kfree(sunxi_nand->ecc);
  1403. return ret;
  1404. }
  1405. static void sunxi_nand_ecc_cleanup(struct sunxi_nand_chip *sunxi_nand)
  1406. {
  1407. struct nand_ecc_ctrl *ecc = &sunxi_nand->nand.ecc;
  1408. switch (ecc->engine_type) {
  1409. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  1410. sunxi_nand_hw_ecc_ctrl_cleanup(sunxi_nand);
  1411. break;
  1412. case NAND_ECC_ENGINE_TYPE_NONE:
  1413. default:
  1414. break;
  1415. }
  1416. }
  1417. static int sunxi_nand_attach_chip(struct nand_chip *nand)
  1418. {
  1419. const struct nand_ecc_props *requirements =
  1420. nanddev_get_ecc_requirements(&nand->base);
  1421. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1422. struct device_node *np = nand_get_flash_node(nand);
  1423. int ret;
  1424. if (nand->bbt_options & NAND_BBT_USE_FLASH)
  1425. nand->bbt_options |= NAND_BBT_NO_OOB;
  1426. if (nand->options & NAND_NEED_SCRAMBLING)
  1427. nand->options |= NAND_NO_SUBPAGE_WRITE;
  1428. nand->options |= NAND_SUBPAGE_READ;
  1429. if (!ecc->size) {
  1430. ecc->size = requirements->step_size;
  1431. ecc->strength = requirements->strength;
  1432. }
  1433. if (!ecc->size || !ecc->strength)
  1434. return -EINVAL;
  1435. switch (ecc->engine_type) {
  1436. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  1437. ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np);
  1438. if (ret)
  1439. return ret;
  1440. break;
  1441. case NAND_ECC_ENGINE_TYPE_NONE:
  1442. case NAND_ECC_ENGINE_TYPE_SOFT:
  1443. break;
  1444. default:
  1445. return -EINVAL;
  1446. }
  1447. return 0;
  1448. }
  1449. static int sunxi_nfc_exec_subop(struct nand_chip *nand,
  1450. const struct nand_subop *subop)
  1451. {
  1452. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  1453. u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { };
  1454. unsigned int i, j, remaining, start;
  1455. void *inbuf = NULL;
  1456. int ret;
  1457. for (i = 0; i < subop->ninstrs; i++) {
  1458. const struct nand_op_instr *instr = &subop->instrs[i];
  1459. switch (instr->type) {
  1460. case NAND_OP_CMD_INSTR:
  1461. if (cmd & NFC_SEND_CMD1) {
  1462. if (WARN_ON(cmd & NFC_SEND_CMD2))
  1463. return -EINVAL;
  1464. cmd |= NFC_SEND_CMD2;
  1465. extcmd |= instr->ctx.cmd.opcode;
  1466. } else {
  1467. cmd |= NFC_SEND_CMD1 |
  1468. NFC_CMD(instr->ctx.cmd.opcode);
  1469. }
  1470. break;
  1471. case NAND_OP_ADDR_INSTR:
  1472. remaining = nand_subop_get_num_addr_cyc(subop, i);
  1473. start = nand_subop_get_addr_start_off(subop, i);
  1474. for (j = 0; j < 8 && j + start < remaining; j++) {
  1475. u32 addr = instr->ctx.addr.addrs[j + start];
  1476. addrs[j / 4] |= addr << (j % 4) * 8;
  1477. }
  1478. if (j)
  1479. cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j);
  1480. break;
  1481. case NAND_OP_DATA_IN_INSTR:
  1482. case NAND_OP_DATA_OUT_INSTR:
  1483. start = nand_subop_get_data_start_off(subop, i);
  1484. remaining = nand_subop_get_data_len(subop, i);
  1485. cnt = min_t(u32, remaining, NFC_SRAM_SIZE);
  1486. cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
  1487. if (instr->type == NAND_OP_DATA_OUT_INSTR) {
  1488. cmd |= NFC_ACCESS_DIR;
  1489. memcpy_toio(nfc->regs + NFC_RAM0_BASE,
  1490. instr->ctx.data.buf.out + start,
  1491. cnt);
  1492. } else {
  1493. inbuf = instr->ctx.data.buf.in + start;
  1494. }
  1495. break;
  1496. case NAND_OP_WAITRDY_INSTR:
  1497. cmd |= NFC_WAIT_FLAG;
  1498. break;
  1499. }
  1500. }
  1501. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  1502. if (ret)
  1503. return ret;
  1504. if (cmd & NFC_SEND_ADR) {
  1505. writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW);
  1506. writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH);
  1507. }
  1508. if (cmd & NFC_SEND_CMD2)
  1509. writel(extcmd,
  1510. nfc->regs +
  1511. (cmd & NFC_ACCESS_DIR ?
  1512. NFC_REG_WCMD_SET : NFC_REG_RCMD_SET));
  1513. if (cmd & NFC_DATA_TRANS)
  1514. writel(cnt, nfc->regs + NFC_REG_CNT);
  1515. writel(cmd, nfc->regs + NFC_REG_CMD);
  1516. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG,
  1517. !(cmd & NFC_WAIT_FLAG) && cnt < 64,
  1518. 0);
  1519. if (ret)
  1520. return ret;
  1521. if (inbuf)
  1522. memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt);
  1523. return 0;
  1524. }
  1525. static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand,
  1526. const struct nand_subop *subop)
  1527. {
  1528. return nand_soft_waitrdy(nand,
  1529. subop->instrs[0].ctx.waitrdy.timeout_ms);
  1530. }
  1531. static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER(
  1532. NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
  1533. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  1534. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  1535. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  1536. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
  1537. NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
  1538. NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
  1539. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  1540. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  1541. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
  1542. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  1543. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
  1544. );
  1545. static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER(
  1546. NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
  1547. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  1548. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  1549. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  1550. NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
  1551. NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
  1552. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  1553. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  1554. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
  1555. NAND_OP_PARSER_PAT_CMD_ELEM(true)),
  1556. NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy,
  1557. NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
  1558. );
  1559. static int sunxi_nfc_exec_op(struct nand_chip *nand,
  1560. const struct nand_operation *op, bool check_only)
  1561. {
  1562. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1563. const struct nand_op_parser *parser;
  1564. if (!check_only)
  1565. sunxi_nfc_select_chip(nand, op->cs);
  1566. if (sunxi_nand->sels[op->cs].rb >= 0)
  1567. parser = &sunxi_nfc_op_parser;
  1568. else
  1569. parser = &sunxi_nfc_norb_op_parser;
  1570. return nand_op_parser_exec_op(nand, parser, op, check_only);
  1571. }
  1572. static const struct nand_controller_ops sunxi_nand_controller_ops = {
  1573. .attach_chip = sunxi_nand_attach_chip,
  1574. .setup_interface = sunxi_nfc_setup_interface,
  1575. .exec_op = sunxi_nfc_exec_op,
  1576. };
  1577. static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
  1578. struct device_node *np)
  1579. {
  1580. struct sunxi_nand_chip *sunxi_nand;
  1581. struct mtd_info *mtd;
  1582. struct nand_chip *nand;
  1583. int nsels;
  1584. int ret;
  1585. int i;
  1586. u32 tmp;
  1587. if (!of_get_property(np, "reg", &nsels))
  1588. return -EINVAL;
  1589. nsels /= sizeof(u32);
  1590. if (!nsels) {
  1591. dev_err(dev, "invalid reg property size\n");
  1592. return -EINVAL;
  1593. }
  1594. sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels),
  1595. GFP_KERNEL);
  1596. if (!sunxi_nand)
  1597. return -ENOMEM;
  1598. sunxi_nand->nsels = nsels;
  1599. for (i = 0; i < nsels; i++) {
  1600. ret = of_property_read_u32_index(np, "reg", i, &tmp);
  1601. if (ret) {
  1602. dev_err(dev, "could not retrieve reg property: %d\n",
  1603. ret);
  1604. return ret;
  1605. }
  1606. if (tmp > NFC_MAX_CS) {
  1607. dev_err(dev,
  1608. "invalid reg value: %u (max CS = 7)\n",
  1609. tmp);
  1610. return -EINVAL;
  1611. }
  1612. if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
  1613. dev_err(dev, "CS %d already assigned\n", tmp);
  1614. return -EINVAL;
  1615. }
  1616. sunxi_nand->sels[i].cs = tmp;
  1617. if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
  1618. tmp < 2)
  1619. sunxi_nand->sels[i].rb = tmp;
  1620. else
  1621. sunxi_nand->sels[i].rb = -1;
  1622. }
  1623. nand = &sunxi_nand->nand;
  1624. /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
  1625. nand->controller = &nfc->controller;
  1626. nand->controller->ops = &sunxi_nand_controller_ops;
  1627. /*
  1628. * Set the ECC mode to the default value in case nothing is specified
  1629. * in the DT.
  1630. */
  1631. nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  1632. nand_set_flash_node(nand, np);
  1633. mtd = nand_to_mtd(nand);
  1634. mtd->dev.parent = dev;
  1635. ret = nand_scan(nand, nsels);
  1636. if (ret)
  1637. return ret;
  1638. ret = mtd_device_register(mtd, NULL, 0);
  1639. if (ret) {
  1640. dev_err(dev, "failed to register mtd device: %d\n", ret);
  1641. nand_cleanup(nand);
  1642. return ret;
  1643. }
  1644. list_add_tail(&sunxi_nand->node, &nfc->chips);
  1645. return 0;
  1646. }
  1647. static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
  1648. {
  1649. struct device_node *np = dev->of_node;
  1650. struct device_node *nand_np;
  1651. int nchips = of_get_child_count(np);
  1652. int ret;
  1653. if (nchips > 8) {
  1654. dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
  1655. return -EINVAL;
  1656. }
  1657. for_each_child_of_node(np, nand_np) {
  1658. ret = sunxi_nand_chip_init(dev, nfc, nand_np);
  1659. if (ret) {
  1660. of_node_put(nand_np);
  1661. return ret;
  1662. }
  1663. }
  1664. return 0;
  1665. }
  1666. static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
  1667. {
  1668. struct sunxi_nand_chip *sunxi_nand;
  1669. struct nand_chip *chip;
  1670. int ret;
  1671. while (!list_empty(&nfc->chips)) {
  1672. sunxi_nand = list_first_entry(&nfc->chips,
  1673. struct sunxi_nand_chip,
  1674. node);
  1675. chip = &sunxi_nand->nand;
  1676. ret = mtd_device_unregister(nand_to_mtd(chip));
  1677. WARN_ON(ret);
  1678. nand_cleanup(chip);
  1679. sunxi_nand_ecc_cleanup(sunxi_nand);
  1680. list_del(&sunxi_nand->node);
  1681. }
  1682. }
  1683. static int sunxi_nfc_dma_init(struct sunxi_nfc *nfc, struct resource *r)
  1684. {
  1685. int ret;
  1686. if (nfc->caps->has_mdma)
  1687. return 0;
  1688. nfc->dmac = dma_request_chan(nfc->dev, "rxtx");
  1689. if (IS_ERR(nfc->dmac)) {
  1690. ret = PTR_ERR(nfc->dmac);
  1691. if (ret == -EPROBE_DEFER)
  1692. return ret;
  1693. /* Ignore errors to fall back to PIO mode */
  1694. dev_warn(nfc->dev, "failed to request rxtx DMA channel: %d\n", ret);
  1695. nfc->dmac = NULL;
  1696. } else {
  1697. struct dma_slave_config dmac_cfg = { };
  1698. dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
  1699. dmac_cfg.dst_addr = dmac_cfg.src_addr;
  1700. dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1701. dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
  1702. dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
  1703. dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
  1704. dmaengine_slave_config(nfc->dmac, &dmac_cfg);
  1705. }
  1706. return 0;
  1707. }
  1708. static int sunxi_nfc_probe(struct platform_device *pdev)
  1709. {
  1710. struct device *dev = &pdev->dev;
  1711. struct resource *r;
  1712. struct sunxi_nfc *nfc;
  1713. int irq;
  1714. int ret;
  1715. nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
  1716. if (!nfc)
  1717. return -ENOMEM;
  1718. nfc->dev = dev;
  1719. nand_controller_init(&nfc->controller);
  1720. INIT_LIST_HEAD(&nfc->chips);
  1721. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1722. nfc->regs = devm_ioremap_resource(dev, r);
  1723. if (IS_ERR(nfc->regs))
  1724. return PTR_ERR(nfc->regs);
  1725. irq = platform_get_irq(pdev, 0);
  1726. if (irq < 0)
  1727. return irq;
  1728. nfc->ahb_clk = devm_clk_get(dev, "ahb");
  1729. if (IS_ERR(nfc->ahb_clk)) {
  1730. dev_err(dev, "failed to retrieve ahb clk\n");
  1731. return PTR_ERR(nfc->ahb_clk);
  1732. }
  1733. ret = clk_prepare_enable(nfc->ahb_clk);
  1734. if (ret)
  1735. return ret;
  1736. nfc->mod_clk = devm_clk_get(dev, "mod");
  1737. if (IS_ERR(nfc->mod_clk)) {
  1738. dev_err(dev, "failed to retrieve mod clk\n");
  1739. ret = PTR_ERR(nfc->mod_clk);
  1740. goto out_ahb_clk_unprepare;
  1741. }
  1742. ret = clk_prepare_enable(nfc->mod_clk);
  1743. if (ret)
  1744. goto out_ahb_clk_unprepare;
  1745. nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
  1746. if (IS_ERR(nfc->reset)) {
  1747. ret = PTR_ERR(nfc->reset);
  1748. goto out_mod_clk_unprepare;
  1749. }
  1750. ret = reset_control_deassert(nfc->reset);
  1751. if (ret) {
  1752. dev_err(dev, "reset err %d\n", ret);
  1753. goto out_mod_clk_unprepare;
  1754. }
  1755. nfc->caps = of_device_get_match_data(&pdev->dev);
  1756. if (!nfc->caps) {
  1757. ret = -EINVAL;
  1758. goto out_ahb_reset_reassert;
  1759. }
  1760. ret = sunxi_nfc_rst(nfc);
  1761. if (ret)
  1762. goto out_ahb_reset_reassert;
  1763. writel(0, nfc->regs + NFC_REG_INT);
  1764. ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
  1765. 0, "sunxi-nand", nfc);
  1766. if (ret)
  1767. goto out_ahb_reset_reassert;
  1768. ret = sunxi_nfc_dma_init(nfc, r);
  1769. if (ret)
  1770. goto out_ahb_reset_reassert;
  1771. platform_set_drvdata(pdev, nfc);
  1772. ret = sunxi_nand_chips_init(dev, nfc);
  1773. if (ret) {
  1774. dev_err(dev, "failed to init nand chips\n");
  1775. goto out_release_dmac;
  1776. }
  1777. return 0;
  1778. out_release_dmac:
  1779. if (nfc->dmac)
  1780. dma_release_channel(nfc->dmac);
  1781. out_ahb_reset_reassert:
  1782. reset_control_assert(nfc->reset);
  1783. out_mod_clk_unprepare:
  1784. clk_disable_unprepare(nfc->mod_clk);
  1785. out_ahb_clk_unprepare:
  1786. clk_disable_unprepare(nfc->ahb_clk);
  1787. return ret;
  1788. }
  1789. static int sunxi_nfc_remove(struct platform_device *pdev)
  1790. {
  1791. struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
  1792. sunxi_nand_chips_cleanup(nfc);
  1793. reset_control_assert(nfc->reset);
  1794. if (nfc->dmac)
  1795. dma_release_channel(nfc->dmac);
  1796. clk_disable_unprepare(nfc->mod_clk);
  1797. clk_disable_unprepare(nfc->ahb_clk);
  1798. return 0;
  1799. }
  1800. static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
  1801. .reg_io_data = NFC_REG_A10_IO_DATA,
  1802. .dma_maxburst = 4,
  1803. };
  1804. static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
  1805. .has_mdma = true,
  1806. .reg_io_data = NFC_REG_A23_IO_DATA,
  1807. .dma_maxburst = 8,
  1808. };
  1809. static const struct of_device_id sunxi_nfc_ids[] = {
  1810. {
  1811. .compatible = "allwinner,sun4i-a10-nand",
  1812. .data = &sunxi_nfc_a10_caps,
  1813. },
  1814. {
  1815. .compatible = "allwinner,sun8i-a23-nand-controller",
  1816. .data = &sunxi_nfc_a23_caps,
  1817. },
  1818. { /* sentinel */ }
  1819. };
  1820. MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
  1821. static struct platform_driver sunxi_nfc_driver = {
  1822. .driver = {
  1823. .name = "sunxi_nand",
  1824. .of_match_table = sunxi_nfc_ids,
  1825. },
  1826. .probe = sunxi_nfc_probe,
  1827. .remove = sunxi_nfc_remove,
  1828. };
  1829. module_platform_driver(sunxi_nfc_driver);
  1830. MODULE_LICENSE("GPL");
  1831. MODULE_AUTHOR("Boris BREZILLON");
  1832. MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
  1833. MODULE_ALIAS("platform:sunxi_nand");