tegra_nand.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018 Stefan Agner <[email protected]>
  4. * Copyright (C) 2014-2015 Lucas Stach <[email protected]>
  5. * Copyright (C) 2012 Avionic Design GmbH
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/completion.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/err.h>
  11. #include <linux/gpio/consumer.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/module.h>
  15. #include <linux/mtd/partitions.h>
  16. #include <linux/mtd/rawnand.h>
  17. #include <linux/of.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/reset.h>
  21. #include <soc/tegra/common.h>
  22. #define COMMAND 0x00
  23. #define COMMAND_GO BIT(31)
  24. #define COMMAND_CLE BIT(30)
  25. #define COMMAND_ALE BIT(29)
  26. #define COMMAND_PIO BIT(28)
  27. #define COMMAND_TX BIT(27)
  28. #define COMMAND_RX BIT(26)
  29. #define COMMAND_SEC_CMD BIT(25)
  30. #define COMMAND_AFT_DAT BIT(24)
  31. #define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
  32. #define COMMAND_A_VALID BIT(19)
  33. #define COMMAND_B_VALID BIT(18)
  34. #define COMMAND_RD_STATUS_CHK BIT(17)
  35. #define COMMAND_RBSY_CHK BIT(16)
  36. #define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
  37. #define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
  38. #define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
  39. #define STATUS 0x04
  40. #define ISR 0x08
  41. #define ISR_CORRFAIL_ERR BIT(24)
  42. #define ISR_UND BIT(7)
  43. #define ISR_OVR BIT(6)
  44. #define ISR_CMD_DONE BIT(5)
  45. #define ISR_ECC_ERR BIT(4)
  46. #define IER 0x0c
  47. #define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
  48. #define IER_UND BIT(7)
  49. #define IER_OVR BIT(6)
  50. #define IER_CMD_DONE BIT(5)
  51. #define IER_ECC_ERR BIT(4)
  52. #define IER_GIE BIT(0)
  53. #define CONFIG 0x10
  54. #define CONFIG_HW_ECC BIT(31)
  55. #define CONFIG_ECC_SEL BIT(30)
  56. #define CONFIG_ERR_COR BIT(29)
  57. #define CONFIG_PIPE_EN BIT(28)
  58. #define CONFIG_TVAL_4 (0 << 24)
  59. #define CONFIG_TVAL_6 (1 << 24)
  60. #define CONFIG_TVAL_8 (2 << 24)
  61. #define CONFIG_SKIP_SPARE BIT(23)
  62. #define CONFIG_BUS_WIDTH_16 BIT(21)
  63. #define CONFIG_COM_BSY BIT(20)
  64. #define CONFIG_PS_256 (0 << 16)
  65. #define CONFIG_PS_512 (1 << 16)
  66. #define CONFIG_PS_1024 (2 << 16)
  67. #define CONFIG_PS_2048 (3 << 16)
  68. #define CONFIG_PS_4096 (4 << 16)
  69. #define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
  70. #define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
  71. #define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
  72. #define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
  73. #define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
  74. #define TIMING_1 0x14
  75. #define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
  76. #define TIMING_TWB(x) (((x) & 0xf) << 24)
  77. #define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
  78. #define TIMING_TWHR(x) (((x) & 0xf) << 16)
  79. #define TIMING_TCS(x) (((x) & 0x3) << 14)
  80. #define TIMING_TWH(x) (((x) & 0x3) << 12)
  81. #define TIMING_TWP(x) (((x) & 0xf) << 8)
  82. #define TIMING_TRH(x) (((x) & 0x3) << 4)
  83. #define TIMING_TRP(x) (((x) & 0xf) << 0)
  84. #define RESP 0x18
  85. #define TIMING_2 0x1c
  86. #define TIMING_TADL(x) ((x) & 0xf)
  87. #define CMD_REG1 0x20
  88. #define CMD_REG2 0x24
  89. #define ADDR_REG1 0x28
  90. #define ADDR_REG2 0x2c
  91. #define DMA_MST_CTRL 0x30
  92. #define DMA_MST_CTRL_GO BIT(31)
  93. #define DMA_MST_CTRL_IN (0 << 30)
  94. #define DMA_MST_CTRL_OUT BIT(30)
  95. #define DMA_MST_CTRL_PERF_EN BIT(29)
  96. #define DMA_MST_CTRL_IE_DONE BIT(28)
  97. #define DMA_MST_CTRL_REUSE BIT(27)
  98. #define DMA_MST_CTRL_BURST_1 (2 << 24)
  99. #define DMA_MST_CTRL_BURST_4 (3 << 24)
  100. #define DMA_MST_CTRL_BURST_8 (4 << 24)
  101. #define DMA_MST_CTRL_BURST_16 (5 << 24)
  102. #define DMA_MST_CTRL_IS_DONE BIT(20)
  103. #define DMA_MST_CTRL_EN_A BIT(2)
  104. #define DMA_MST_CTRL_EN_B BIT(1)
  105. #define DMA_CFG_A 0x34
  106. #define DMA_CFG_B 0x38
  107. #define FIFO_CTRL 0x3c
  108. #define FIFO_CTRL_CLR_ALL BIT(3)
  109. #define DATA_PTR 0x40
  110. #define TAG_PTR 0x44
  111. #define ECC_PTR 0x48
  112. #define DEC_STATUS 0x4c
  113. #define DEC_STATUS_A_ECC_FAIL BIT(1)
  114. #define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
  115. #define DEC_STATUS_ERR_COUNT_SHIFT 16
  116. #define HWSTATUS_CMD 0x50
  117. #define HWSTATUS_MASK 0x54
  118. #define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
  119. #define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
  120. #define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
  121. #define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
  122. #define BCH_CONFIG 0xcc
  123. #define BCH_ENABLE BIT(0)
  124. #define BCH_TVAL_4 (0 << 4)
  125. #define BCH_TVAL_8 (1 << 4)
  126. #define BCH_TVAL_14 (2 << 4)
  127. #define BCH_TVAL_16 (3 << 4)
  128. #define DEC_STAT_RESULT 0xd0
  129. #define DEC_STAT_BUF 0xd4
  130. #define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
  131. #define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
  132. #define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
  133. #define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
  134. #define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
  135. #define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
  136. #define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
  137. #define SKIP_SPARE_BYTES 4
  138. #define BITS_PER_STEP_RS 18
  139. #define BITS_PER_STEP_BCH 13
  140. #define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
  141. #define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
  142. #define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
  143. HWSTATUS_RDSTATUS_VALUE(0) | \
  144. HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
  145. HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
  146. struct tegra_nand_controller {
  147. struct nand_controller controller;
  148. struct device *dev;
  149. void __iomem *regs;
  150. int irq;
  151. struct clk *clk;
  152. struct completion command_complete;
  153. struct completion dma_complete;
  154. bool last_read_error;
  155. int cur_cs;
  156. struct nand_chip *chip;
  157. };
  158. struct tegra_nand_chip {
  159. struct nand_chip chip;
  160. struct gpio_desc *wp_gpio;
  161. struct mtd_oob_region ecc;
  162. u32 config;
  163. u32 config_ecc;
  164. u32 bch_config;
  165. int cs[1];
  166. };
  167. static inline struct tegra_nand_controller *
  168. to_tegra_ctrl(struct nand_controller *hw_ctrl)
  169. {
  170. return container_of(hw_ctrl, struct tegra_nand_controller, controller);
  171. }
  172. static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
  173. {
  174. return container_of(chip, struct tegra_nand_chip, chip);
  175. }
  176. static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
  177. struct mtd_oob_region *oobregion)
  178. {
  179. struct nand_chip *chip = mtd_to_nand(mtd);
  180. int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
  181. BITS_PER_BYTE);
  182. if (section > 0)
  183. return -ERANGE;
  184. oobregion->offset = SKIP_SPARE_BYTES;
  185. oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
  186. return 0;
  187. }
  188. static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
  189. struct mtd_oob_region *oobregion)
  190. {
  191. return -ERANGE;
  192. }
  193. static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
  194. .ecc = tegra_nand_ooblayout_rs_ecc,
  195. .free = tegra_nand_ooblayout_no_free,
  196. };
  197. static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
  198. struct mtd_oob_region *oobregion)
  199. {
  200. struct nand_chip *chip = mtd_to_nand(mtd);
  201. int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
  202. BITS_PER_BYTE);
  203. if (section > 0)
  204. return -ERANGE;
  205. oobregion->offset = SKIP_SPARE_BYTES;
  206. oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
  207. return 0;
  208. }
  209. static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
  210. .ecc = tegra_nand_ooblayout_bch_ecc,
  211. .free = tegra_nand_ooblayout_no_free,
  212. };
  213. static irqreturn_t tegra_nand_irq(int irq, void *data)
  214. {
  215. struct tegra_nand_controller *ctrl = data;
  216. u32 isr, dma;
  217. isr = readl_relaxed(ctrl->regs + ISR);
  218. dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
  219. dev_dbg(ctrl->dev, "isr %08x\n", isr);
  220. if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
  221. return IRQ_NONE;
  222. /*
  223. * The bit name is somewhat missleading: This is also set when
  224. * HW ECC was successful. The data sheet states:
  225. * Correctable OR Un-correctable errors occurred in the DMA transfer...
  226. */
  227. if (isr & ISR_CORRFAIL_ERR)
  228. ctrl->last_read_error = true;
  229. if (isr & ISR_CMD_DONE)
  230. complete(&ctrl->command_complete);
  231. if (isr & ISR_UND)
  232. dev_err(ctrl->dev, "FIFO underrun\n");
  233. if (isr & ISR_OVR)
  234. dev_err(ctrl->dev, "FIFO overrun\n");
  235. /* handle DMA interrupts */
  236. if (dma & DMA_MST_CTRL_IS_DONE) {
  237. writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
  238. complete(&ctrl->dma_complete);
  239. }
  240. /* clear interrupts */
  241. writel_relaxed(isr, ctrl->regs + ISR);
  242. return IRQ_HANDLED;
  243. }
  244. static const char * const tegra_nand_reg_names[] = {
  245. "COMMAND",
  246. "STATUS",
  247. "ISR",
  248. "IER",
  249. "CONFIG",
  250. "TIMING",
  251. NULL,
  252. "TIMING2",
  253. "CMD_REG1",
  254. "CMD_REG2",
  255. "ADDR_REG1",
  256. "ADDR_REG2",
  257. "DMA_MST_CTRL",
  258. "DMA_CFG_A",
  259. "DMA_CFG_B",
  260. "FIFO_CTRL",
  261. };
  262. static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
  263. {
  264. u32 reg;
  265. int i;
  266. dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
  267. for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
  268. const char *reg_name = tegra_nand_reg_names[i];
  269. if (!reg_name)
  270. continue;
  271. reg = readl_relaxed(ctrl->regs + (i * 4));
  272. dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
  273. }
  274. }
  275. static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
  276. {
  277. u32 isr, dma;
  278. disable_irq(ctrl->irq);
  279. /* Abort current command/DMA operation */
  280. writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
  281. writel_relaxed(0, ctrl->regs + COMMAND);
  282. /* clear interrupts */
  283. isr = readl_relaxed(ctrl->regs + ISR);
  284. writel_relaxed(isr, ctrl->regs + ISR);
  285. dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
  286. writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
  287. reinit_completion(&ctrl->command_complete);
  288. reinit_completion(&ctrl->dma_complete);
  289. enable_irq(ctrl->irq);
  290. }
  291. static int tegra_nand_cmd(struct nand_chip *chip,
  292. const struct nand_subop *subop)
  293. {
  294. const struct nand_op_instr *instr;
  295. const struct nand_op_instr *instr_data_in = NULL;
  296. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  297. unsigned int op_id, size = 0, offset = 0;
  298. bool first_cmd = true;
  299. u32 reg, cmd = 0;
  300. int ret;
  301. for (op_id = 0; op_id < subop->ninstrs; op_id++) {
  302. unsigned int naddrs, i;
  303. const u8 *addrs;
  304. u32 addr1 = 0, addr2 = 0;
  305. instr = &subop->instrs[op_id];
  306. switch (instr->type) {
  307. case NAND_OP_CMD_INSTR:
  308. if (first_cmd) {
  309. cmd |= COMMAND_CLE;
  310. writel_relaxed(instr->ctx.cmd.opcode,
  311. ctrl->regs + CMD_REG1);
  312. } else {
  313. cmd |= COMMAND_SEC_CMD;
  314. writel_relaxed(instr->ctx.cmd.opcode,
  315. ctrl->regs + CMD_REG2);
  316. }
  317. first_cmd = false;
  318. break;
  319. case NAND_OP_ADDR_INSTR:
  320. offset = nand_subop_get_addr_start_off(subop, op_id);
  321. naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
  322. addrs = &instr->ctx.addr.addrs[offset];
  323. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
  324. for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
  325. addr1 |= *addrs++ << (BITS_PER_BYTE * i);
  326. naddrs -= i;
  327. for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
  328. addr2 |= *addrs++ << (BITS_PER_BYTE * i);
  329. writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
  330. writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
  331. break;
  332. case NAND_OP_DATA_IN_INSTR:
  333. size = nand_subop_get_data_len(subop, op_id);
  334. offset = nand_subop_get_data_start_off(subop, op_id);
  335. cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
  336. COMMAND_RX | COMMAND_A_VALID;
  337. instr_data_in = instr;
  338. break;
  339. case NAND_OP_DATA_OUT_INSTR:
  340. size = nand_subop_get_data_len(subop, op_id);
  341. offset = nand_subop_get_data_start_off(subop, op_id);
  342. cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
  343. COMMAND_TX | COMMAND_A_VALID;
  344. memcpy(&reg, instr->ctx.data.buf.out + offset, size);
  345. writel_relaxed(reg, ctrl->regs + RESP);
  346. break;
  347. case NAND_OP_WAITRDY_INSTR:
  348. cmd |= COMMAND_RBSY_CHK;
  349. break;
  350. }
  351. }
  352. cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
  353. writel_relaxed(cmd, ctrl->regs + COMMAND);
  354. ret = wait_for_completion_timeout(&ctrl->command_complete,
  355. msecs_to_jiffies(500));
  356. if (!ret) {
  357. dev_err(ctrl->dev, "COMMAND timeout\n");
  358. tegra_nand_dump_reg(ctrl);
  359. tegra_nand_controller_abort(ctrl);
  360. return -ETIMEDOUT;
  361. }
  362. if (instr_data_in) {
  363. reg = readl_relaxed(ctrl->regs + RESP);
  364. memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
  365. }
  366. return 0;
  367. }
  368. static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
  369. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  370. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  371. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  372. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  373. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
  374. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  375. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
  376. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  377. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  378. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  379. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  380. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
  381. NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
  382. );
  383. static void tegra_nand_select_target(struct nand_chip *chip,
  384. unsigned int die_nr)
  385. {
  386. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  387. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  388. ctrl->cur_cs = nand->cs[die_nr];
  389. }
  390. static int tegra_nand_exec_op(struct nand_chip *chip,
  391. const struct nand_operation *op,
  392. bool check_only)
  393. {
  394. if (!check_only)
  395. tegra_nand_select_target(chip, op->cs);
  396. return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
  397. check_only);
  398. }
  399. static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
  400. struct nand_chip *chip, bool enable)
  401. {
  402. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  403. if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
  404. writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
  405. else
  406. writel_relaxed(0, ctrl->regs + BCH_CONFIG);
  407. if (enable)
  408. writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
  409. else
  410. writel_relaxed(nand->config, ctrl->regs + CONFIG);
  411. }
  412. static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
  413. void *buf, void *oob_buf, int oob_len, int page,
  414. bool read)
  415. {
  416. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  417. enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  418. dma_addr_t dma_addr = 0, dma_addr_oob = 0;
  419. u32 addr1, cmd, dma_ctrl;
  420. int ret;
  421. tegra_nand_select_target(chip, chip->cur_cs);
  422. if (read) {
  423. writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
  424. writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
  425. } else {
  426. writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
  427. writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
  428. }
  429. cmd = COMMAND_CLE | COMMAND_SEC_CMD;
  430. /* Lower 16-bits are column, by default 0 */
  431. addr1 = page << 16;
  432. if (!buf)
  433. addr1 |= mtd->writesize;
  434. writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
  435. if (chip->options & NAND_ROW_ADDR_3) {
  436. writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
  437. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
  438. } else {
  439. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
  440. }
  441. if (buf) {
  442. dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
  443. ret = dma_mapping_error(ctrl->dev, dma_addr);
  444. if (ret) {
  445. dev_err(ctrl->dev, "dma mapping error\n");
  446. return -EINVAL;
  447. }
  448. writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
  449. writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
  450. }
  451. if (oob_buf) {
  452. dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
  453. dir);
  454. ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
  455. if (ret) {
  456. dev_err(ctrl->dev, "dma mapping error\n");
  457. ret = -EINVAL;
  458. goto err_unmap_dma_page;
  459. }
  460. writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
  461. writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
  462. }
  463. dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
  464. DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
  465. DMA_MST_CTRL_BURST_16;
  466. if (buf)
  467. dma_ctrl |= DMA_MST_CTRL_EN_A;
  468. if (oob_buf)
  469. dma_ctrl |= DMA_MST_CTRL_EN_B;
  470. if (read)
  471. dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
  472. else
  473. dma_ctrl |= DMA_MST_CTRL_OUT;
  474. writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
  475. cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
  476. COMMAND_CE(ctrl->cur_cs);
  477. if (buf)
  478. cmd |= COMMAND_A_VALID;
  479. if (oob_buf)
  480. cmd |= COMMAND_B_VALID;
  481. if (read)
  482. cmd |= COMMAND_RX;
  483. else
  484. cmd |= COMMAND_TX | COMMAND_AFT_DAT;
  485. writel_relaxed(cmd, ctrl->regs + COMMAND);
  486. ret = wait_for_completion_timeout(&ctrl->command_complete,
  487. msecs_to_jiffies(500));
  488. if (!ret) {
  489. dev_err(ctrl->dev, "COMMAND timeout\n");
  490. tegra_nand_dump_reg(ctrl);
  491. tegra_nand_controller_abort(ctrl);
  492. ret = -ETIMEDOUT;
  493. goto err_unmap_dma;
  494. }
  495. ret = wait_for_completion_timeout(&ctrl->dma_complete,
  496. msecs_to_jiffies(500));
  497. if (!ret) {
  498. dev_err(ctrl->dev, "DMA timeout\n");
  499. tegra_nand_dump_reg(ctrl);
  500. tegra_nand_controller_abort(ctrl);
  501. ret = -ETIMEDOUT;
  502. goto err_unmap_dma;
  503. }
  504. ret = 0;
  505. err_unmap_dma:
  506. if (oob_buf)
  507. dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
  508. err_unmap_dma_page:
  509. if (buf)
  510. dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
  511. return ret;
  512. }
  513. static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
  514. int oob_required, int page)
  515. {
  516. struct mtd_info *mtd = nand_to_mtd(chip);
  517. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  518. return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
  519. mtd->oobsize, page, true);
  520. }
  521. static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
  522. int oob_required, int page)
  523. {
  524. struct mtd_info *mtd = nand_to_mtd(chip);
  525. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  526. return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
  527. mtd->oobsize, page, false);
  528. }
  529. static int tegra_nand_read_oob(struct nand_chip *chip, int page)
  530. {
  531. struct mtd_info *mtd = nand_to_mtd(chip);
  532. return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
  533. mtd->oobsize, page, true);
  534. }
  535. static int tegra_nand_write_oob(struct nand_chip *chip, int page)
  536. {
  537. struct mtd_info *mtd = nand_to_mtd(chip);
  538. return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
  539. mtd->oobsize, page, false);
  540. }
  541. static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
  542. int oob_required, int page)
  543. {
  544. struct mtd_info *mtd = nand_to_mtd(chip);
  545. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  546. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  547. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  548. u32 dec_stat, max_corr_cnt;
  549. unsigned long fail_sec_flag;
  550. int ret;
  551. tegra_nand_hw_ecc(ctrl, chip, true);
  552. ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
  553. tegra_nand_hw_ecc(ctrl, chip, false);
  554. if (ret)
  555. return ret;
  556. /* No correctable or un-correctable errors, page must have 0 bitflips */
  557. if (!ctrl->last_read_error)
  558. return 0;
  559. /*
  560. * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
  561. * which contains information for all ECC selections.
  562. *
  563. * Note that since we do not use Command Queues DEC_RESULT does not
  564. * state the number of pages we can read from the DEC_STAT_BUF. But
  565. * since CORRFAIL_ERR did occur during page read we do have a valid
  566. * result in DEC_STAT_BUF.
  567. */
  568. ctrl->last_read_error = false;
  569. dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
  570. fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
  571. DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
  572. max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
  573. DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
  574. if (fail_sec_flag) {
  575. int bit, max_bitflips = 0;
  576. /*
  577. * Since we do not support subpage writes, a complete page
  578. * is either written or not. We can take a shortcut here by
  579. * checking wheather any of the sector has been successful
  580. * read. If at least one sectors has been read successfully,
  581. * the page must have been a written previously. It cannot
  582. * be an erased page.
  583. *
  584. * E.g. controller might return fail_sec_flag with 0x4, which
  585. * would mean only the third sector failed to correct. The
  586. * page must have been written and the third sector is really
  587. * not correctable anymore.
  588. */
  589. if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
  590. mtd->ecc_stats.failed += hweight8(fail_sec_flag);
  591. return max_corr_cnt;
  592. }
  593. /*
  594. * All sectors failed to correct, but the ECC isn't smart
  595. * enough to figure out if a page is really just erased.
  596. * Read OOB data and check whether data/OOB is completely
  597. * erased or if error correction just failed for all sub-
  598. * pages.
  599. */
  600. ret = tegra_nand_read_oob(chip, page);
  601. if (ret < 0)
  602. return ret;
  603. for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
  604. u8 *data = buf + (chip->ecc.size * bit);
  605. u8 *oob = chip->oob_poi + nand->ecc.offset +
  606. (chip->ecc.bytes * bit);
  607. ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
  608. oob, chip->ecc.bytes,
  609. NULL, 0,
  610. chip->ecc.strength);
  611. if (ret < 0) {
  612. mtd->ecc_stats.failed++;
  613. } else {
  614. mtd->ecc_stats.corrected += ret;
  615. max_bitflips = max(ret, max_bitflips);
  616. }
  617. }
  618. return max_t(unsigned int, max_corr_cnt, max_bitflips);
  619. } else {
  620. int corr_sec_flag;
  621. corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
  622. DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
  623. /*
  624. * The value returned in the register is the maximum of
  625. * bitflips encountered in any of the ECC regions. As there is
  626. * no way to get the number of bitflips in a specific regions
  627. * we are not able to deliver correct stats but instead
  628. * overestimate the number of corrected bitflips by assuming
  629. * that all regions where errors have been corrected
  630. * encountered the maximum number of bitflips.
  631. */
  632. mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
  633. return max_corr_cnt;
  634. }
  635. }
  636. static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
  637. int oob_required, int page)
  638. {
  639. struct mtd_info *mtd = nand_to_mtd(chip);
  640. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  641. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  642. int ret;
  643. tegra_nand_hw_ecc(ctrl, chip, true);
  644. ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
  645. 0, page, false);
  646. tegra_nand_hw_ecc(ctrl, chip, false);
  647. return ret;
  648. }
  649. static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
  650. const struct nand_sdr_timings *timings)
  651. {
  652. /*
  653. * The period (and all other timings in this function) is in ps,
  654. * so need to take care here to avoid integer overflows.
  655. */
  656. unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
  657. unsigned int period = DIV_ROUND_UP(1000000, rate);
  658. u32 val, reg = 0;
  659. val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
  660. timings->tRC_min), period);
  661. reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
  662. val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
  663. max(timings->tALS_min, timings->tALH_min)),
  664. period);
  665. reg |= TIMING_TCS(OFFSET(val, 2));
  666. val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
  667. period);
  668. reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
  669. reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
  670. reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
  671. reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
  672. reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
  673. reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
  674. writel_relaxed(reg, ctrl->regs + TIMING_1);
  675. val = DIV_ROUND_UP(timings->tADL_min, period);
  676. reg = TIMING_TADL(OFFSET(val, 3));
  677. writel_relaxed(reg, ctrl->regs + TIMING_2);
  678. }
  679. static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
  680. const struct nand_interface_config *conf)
  681. {
  682. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  683. const struct nand_sdr_timings *timings;
  684. timings = nand_get_sdr_timings(conf);
  685. if (IS_ERR(timings))
  686. return PTR_ERR(timings);
  687. if (csline == NAND_DATA_IFACE_CHECK_ONLY)
  688. return 0;
  689. tegra_nand_setup_timing(ctrl, timings);
  690. return 0;
  691. }
  692. static const int rs_strength_bootable[] = { 4 };
  693. static const int rs_strength[] = { 4, 6, 8 };
  694. static const int bch_strength_bootable[] = { 8, 16 };
  695. static const int bch_strength[] = { 4, 8, 14, 16 };
  696. static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
  697. int strength_len, int bits_per_step,
  698. int oobsize)
  699. {
  700. struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
  701. const struct nand_ecc_props *requirements =
  702. nanddev_get_ecc_requirements(base);
  703. bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
  704. int i;
  705. /*
  706. * Loop through available strengths. Backwards in case we try to
  707. * maximize the BCH strength.
  708. */
  709. for (i = 0; i < strength_len; i++) {
  710. int strength_sel, bytes_per_step, bytes_per_page;
  711. if (maximize) {
  712. strength_sel = strength[strength_len - i - 1];
  713. } else {
  714. strength_sel = strength[i];
  715. if (strength_sel < requirements->strength)
  716. continue;
  717. }
  718. bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
  719. BITS_PER_BYTE);
  720. bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
  721. /* Check whether strength fits OOB */
  722. if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
  723. return strength_sel;
  724. }
  725. return -EINVAL;
  726. }
  727. static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
  728. {
  729. const int *strength;
  730. int strength_len, bits_per_step;
  731. switch (chip->ecc.algo) {
  732. case NAND_ECC_ALGO_RS:
  733. bits_per_step = BITS_PER_STEP_RS;
  734. if (chip->options & NAND_IS_BOOT_MEDIUM) {
  735. strength = rs_strength_bootable;
  736. strength_len = ARRAY_SIZE(rs_strength_bootable);
  737. } else {
  738. strength = rs_strength;
  739. strength_len = ARRAY_SIZE(rs_strength);
  740. }
  741. break;
  742. case NAND_ECC_ALGO_BCH:
  743. bits_per_step = BITS_PER_STEP_BCH;
  744. if (chip->options & NAND_IS_BOOT_MEDIUM) {
  745. strength = bch_strength_bootable;
  746. strength_len = ARRAY_SIZE(bch_strength_bootable);
  747. } else {
  748. strength = bch_strength;
  749. strength_len = ARRAY_SIZE(bch_strength);
  750. }
  751. break;
  752. default:
  753. return -EINVAL;
  754. }
  755. return tegra_nand_get_strength(chip, strength, strength_len,
  756. bits_per_step, oobsize);
  757. }
  758. static int tegra_nand_attach_chip(struct nand_chip *chip)
  759. {
  760. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  761. const struct nand_ecc_props *requirements =
  762. nanddev_get_ecc_requirements(&chip->base);
  763. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  764. struct mtd_info *mtd = nand_to_mtd(chip);
  765. int bits_per_step;
  766. int ret;
  767. if (chip->bbt_options & NAND_BBT_USE_FLASH)
  768. chip->bbt_options |= NAND_BBT_NO_OOB;
  769. chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  770. chip->ecc.size = 512;
  771. chip->ecc.steps = mtd->writesize / chip->ecc.size;
  772. if (requirements->step_size != 512) {
  773. dev_err(ctrl->dev, "Unsupported step size %d\n",
  774. requirements->step_size);
  775. return -EINVAL;
  776. }
  777. chip->ecc.read_page = tegra_nand_read_page_hwecc;
  778. chip->ecc.write_page = tegra_nand_write_page_hwecc;
  779. chip->ecc.read_page_raw = tegra_nand_read_page_raw;
  780. chip->ecc.write_page_raw = tegra_nand_write_page_raw;
  781. chip->ecc.read_oob = tegra_nand_read_oob;
  782. chip->ecc.write_oob = tegra_nand_write_oob;
  783. if (chip->options & NAND_BUSWIDTH_16)
  784. nand->config |= CONFIG_BUS_WIDTH_16;
  785. if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
  786. if (mtd->writesize < 2048)
  787. chip->ecc.algo = NAND_ECC_ALGO_RS;
  788. else
  789. chip->ecc.algo = NAND_ECC_ALGO_BCH;
  790. }
  791. if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
  792. dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
  793. return -EINVAL;
  794. }
  795. if (!chip->ecc.strength) {
  796. ret = tegra_nand_select_strength(chip, mtd->oobsize);
  797. if (ret < 0) {
  798. dev_err(ctrl->dev,
  799. "No valid strength found, minimum %d\n",
  800. requirements->strength);
  801. return ret;
  802. }
  803. chip->ecc.strength = ret;
  804. }
  805. nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
  806. CONFIG_SKIP_SPARE_SIZE_4;
  807. switch (chip->ecc.algo) {
  808. case NAND_ECC_ALGO_RS:
  809. bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
  810. mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
  811. nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
  812. CONFIG_ERR_COR;
  813. switch (chip->ecc.strength) {
  814. case 4:
  815. nand->config_ecc |= CONFIG_TVAL_4;
  816. break;
  817. case 6:
  818. nand->config_ecc |= CONFIG_TVAL_6;
  819. break;
  820. case 8:
  821. nand->config_ecc |= CONFIG_TVAL_8;
  822. break;
  823. default:
  824. dev_err(ctrl->dev, "ECC strength %d not supported\n",
  825. chip->ecc.strength);
  826. return -EINVAL;
  827. }
  828. break;
  829. case NAND_ECC_ALGO_BCH:
  830. bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
  831. mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
  832. nand->bch_config = BCH_ENABLE;
  833. switch (chip->ecc.strength) {
  834. case 4:
  835. nand->bch_config |= BCH_TVAL_4;
  836. break;
  837. case 8:
  838. nand->bch_config |= BCH_TVAL_8;
  839. break;
  840. case 14:
  841. nand->bch_config |= BCH_TVAL_14;
  842. break;
  843. case 16:
  844. nand->bch_config |= BCH_TVAL_16;
  845. break;
  846. default:
  847. dev_err(ctrl->dev, "ECC strength %d not supported\n",
  848. chip->ecc.strength);
  849. return -EINVAL;
  850. }
  851. break;
  852. default:
  853. dev_err(ctrl->dev, "ECC algorithm not supported\n");
  854. return -EINVAL;
  855. }
  856. dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
  857. chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
  858. chip->ecc.strength);
  859. chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
  860. switch (mtd->writesize) {
  861. case 256:
  862. nand->config |= CONFIG_PS_256;
  863. break;
  864. case 512:
  865. nand->config |= CONFIG_PS_512;
  866. break;
  867. case 1024:
  868. nand->config |= CONFIG_PS_1024;
  869. break;
  870. case 2048:
  871. nand->config |= CONFIG_PS_2048;
  872. break;
  873. case 4096:
  874. nand->config |= CONFIG_PS_4096;
  875. break;
  876. default:
  877. dev_err(ctrl->dev, "Unsupported writesize %d\n",
  878. mtd->writesize);
  879. return -ENODEV;
  880. }
  881. /* Store complete configuration for HW ECC in config_ecc */
  882. nand->config_ecc |= nand->config;
  883. /* Non-HW ECC read/writes complete OOB */
  884. nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
  885. writel_relaxed(nand->config, ctrl->regs + CONFIG);
  886. return 0;
  887. }
  888. static const struct nand_controller_ops tegra_nand_controller_ops = {
  889. .attach_chip = &tegra_nand_attach_chip,
  890. .exec_op = tegra_nand_exec_op,
  891. .setup_interface = tegra_nand_setup_interface,
  892. };
  893. static int tegra_nand_chips_init(struct device *dev,
  894. struct tegra_nand_controller *ctrl)
  895. {
  896. struct device_node *np = dev->of_node;
  897. struct device_node *np_nand;
  898. int nsels, nchips = of_get_child_count(np);
  899. struct tegra_nand_chip *nand;
  900. struct mtd_info *mtd;
  901. struct nand_chip *chip;
  902. int ret;
  903. u32 cs;
  904. if (nchips != 1) {
  905. dev_err(dev, "Currently only one NAND chip supported\n");
  906. return -EINVAL;
  907. }
  908. np_nand = of_get_next_child(np, NULL);
  909. nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
  910. if (nsels != 1) {
  911. dev_err(dev, "Missing/invalid reg property\n");
  912. return -EINVAL;
  913. }
  914. /* Retrieve CS id, currently only single die NAND supported */
  915. ret = of_property_read_u32(np_nand, "reg", &cs);
  916. if (ret) {
  917. dev_err(dev, "could not retrieve reg property: %d\n", ret);
  918. return ret;
  919. }
  920. nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
  921. if (!nand)
  922. return -ENOMEM;
  923. nand->cs[0] = cs;
  924. nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
  925. if (IS_ERR(nand->wp_gpio)) {
  926. ret = PTR_ERR(nand->wp_gpio);
  927. dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
  928. return ret;
  929. }
  930. chip = &nand->chip;
  931. chip->controller = &ctrl->controller;
  932. mtd = nand_to_mtd(chip);
  933. mtd->dev.parent = dev;
  934. mtd->owner = THIS_MODULE;
  935. nand_set_flash_node(chip, np_nand);
  936. if (!mtd->name)
  937. mtd->name = "tegra_nand";
  938. chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
  939. ret = nand_scan(chip, 1);
  940. if (ret)
  941. return ret;
  942. mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
  943. ret = mtd_device_register(mtd, NULL, 0);
  944. if (ret) {
  945. dev_err(dev, "Failed to register mtd device: %d\n", ret);
  946. nand_cleanup(chip);
  947. return ret;
  948. }
  949. ctrl->chip = chip;
  950. return 0;
  951. }
  952. static int tegra_nand_probe(struct platform_device *pdev)
  953. {
  954. struct reset_control *rst;
  955. struct tegra_nand_controller *ctrl;
  956. int err = 0;
  957. ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
  958. if (!ctrl)
  959. return -ENOMEM;
  960. ctrl->dev = &pdev->dev;
  961. platform_set_drvdata(pdev, ctrl);
  962. nand_controller_init(&ctrl->controller);
  963. ctrl->controller.ops = &tegra_nand_controller_ops;
  964. ctrl->regs = devm_platform_ioremap_resource(pdev, 0);
  965. if (IS_ERR(ctrl->regs))
  966. return PTR_ERR(ctrl->regs);
  967. rst = devm_reset_control_get(&pdev->dev, "nand");
  968. if (IS_ERR(rst))
  969. return PTR_ERR(rst);
  970. ctrl->clk = devm_clk_get(&pdev->dev, "nand");
  971. if (IS_ERR(ctrl->clk))
  972. return PTR_ERR(ctrl->clk);
  973. err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
  974. if (err)
  975. return err;
  976. /*
  977. * This driver doesn't support active power management yet,
  978. * so we will simply keep device resumed.
  979. */
  980. pm_runtime_enable(&pdev->dev);
  981. err = pm_runtime_resume_and_get(&pdev->dev);
  982. if (err)
  983. goto err_dis_pm;
  984. err = reset_control_reset(rst);
  985. if (err) {
  986. dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
  987. goto err_put_pm;
  988. }
  989. writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
  990. writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
  991. writel_relaxed(INT_MASK, ctrl->regs + IER);
  992. init_completion(&ctrl->command_complete);
  993. init_completion(&ctrl->dma_complete);
  994. ctrl->irq = platform_get_irq(pdev, 0);
  995. if (ctrl->irq < 0) {
  996. err = ctrl->irq;
  997. goto err_put_pm;
  998. }
  999. err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
  1000. dev_name(&pdev->dev), ctrl);
  1001. if (err) {
  1002. dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
  1003. goto err_put_pm;
  1004. }
  1005. writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
  1006. err = tegra_nand_chips_init(ctrl->dev, ctrl);
  1007. if (err)
  1008. goto err_put_pm;
  1009. return 0;
  1010. err_put_pm:
  1011. pm_runtime_put_sync_suspend(ctrl->dev);
  1012. pm_runtime_force_suspend(ctrl->dev);
  1013. err_dis_pm:
  1014. pm_runtime_disable(&pdev->dev);
  1015. return err;
  1016. }
  1017. static int tegra_nand_remove(struct platform_device *pdev)
  1018. {
  1019. struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
  1020. struct nand_chip *chip = ctrl->chip;
  1021. struct mtd_info *mtd = nand_to_mtd(chip);
  1022. WARN_ON(mtd_device_unregister(mtd));
  1023. nand_cleanup(chip);
  1024. pm_runtime_put_sync_suspend(ctrl->dev);
  1025. pm_runtime_force_suspend(ctrl->dev);
  1026. return 0;
  1027. }
  1028. static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
  1029. {
  1030. struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
  1031. int err;
  1032. err = clk_prepare_enable(ctrl->clk);
  1033. if (err) {
  1034. dev_err(dev, "Failed to enable clock: %d\n", err);
  1035. return err;
  1036. }
  1037. return 0;
  1038. }
  1039. static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
  1040. {
  1041. struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
  1042. clk_disable_unprepare(ctrl->clk);
  1043. return 0;
  1044. }
  1045. static const struct dev_pm_ops tegra_nand_pm = {
  1046. SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
  1047. NULL)
  1048. };
  1049. static const struct of_device_id tegra_nand_of_match[] = {
  1050. { .compatible = "nvidia,tegra20-nand" },
  1051. { /* sentinel */ }
  1052. };
  1053. MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
  1054. static struct platform_driver tegra_nand_driver = {
  1055. .driver = {
  1056. .name = "tegra-nand",
  1057. .of_match_table = tegra_nand_of_match,
  1058. .pm = &tegra_nand_pm,
  1059. },
  1060. .probe = tegra_nand_probe,
  1061. .remove = tegra_nand_remove,
  1062. };
  1063. module_platform_driver(tegra_nand_driver);
  1064. MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
  1065. MODULE_AUTHOR("Thierry Reding <[email protected]>");
  1066. MODULE_AUTHOR("Lucas Stach <[email protected]>");
  1067. MODULE_AUTHOR("Stefan Agner <[email protected]>");
  1068. MODULE_LICENSE("GPL v2");