renesas-nand-controller.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Evatronix/Renesas R-Car Gen3, RZ/N1D, RZ/N1S, RZ/N1L NAND controller driver
  4. *
  5. * Copyright (C) 2021 Schneider Electric
  6. * Author: Miquel RAYNAL <[email protected]>
  7. */
  8. #include <linux/bitfield.h>
  9. #include <linux/clk.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/module.h>
  14. #include <linux/mtd/mtd.h>
  15. #include <linux/mtd/rawnand.h>
  16. #include <linux/of.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #define COMMAND_REG 0x00
  21. #define COMMAND_SEQ(x) FIELD_PREP(GENMASK(5, 0), (x))
  22. #define COMMAND_SEQ_10 COMMAND_SEQ(0x2A)
  23. #define COMMAND_SEQ_12 COMMAND_SEQ(0x0C)
  24. #define COMMAND_SEQ_18 COMMAND_SEQ(0x32)
  25. #define COMMAND_SEQ_19 COMMAND_SEQ(0x13)
  26. #define COMMAND_SEQ_GEN_IN COMMAND_SEQ_18
  27. #define COMMAND_SEQ_GEN_OUT COMMAND_SEQ_19
  28. #define COMMAND_SEQ_READ_PAGE COMMAND_SEQ_10
  29. #define COMMAND_SEQ_WRITE_PAGE COMMAND_SEQ_12
  30. #define COMMAND_INPUT_SEL_AHBS 0
  31. #define COMMAND_INPUT_SEL_DMA BIT(6)
  32. #define COMMAND_FIFO_SEL 0
  33. #define COMMAND_DATA_SEL BIT(7)
  34. #define COMMAND_0(x) FIELD_PREP(GENMASK(15, 8), (x))
  35. #define COMMAND_1(x) FIELD_PREP(GENMASK(23, 16), (x))
  36. #define COMMAND_2(x) FIELD_PREP(GENMASK(31, 24), (x))
  37. #define CONTROL_REG 0x04
  38. #define CONTROL_CHECK_RB_LINE 0
  39. #define CONTROL_ECC_BLOCK_SIZE(x) FIELD_PREP(GENMASK(2, 1), (x))
  40. #define CONTROL_ECC_BLOCK_SIZE_256 CONTROL_ECC_BLOCK_SIZE(0)
  41. #define CONTROL_ECC_BLOCK_SIZE_512 CONTROL_ECC_BLOCK_SIZE(1)
  42. #define CONTROL_ECC_BLOCK_SIZE_1024 CONTROL_ECC_BLOCK_SIZE(2)
  43. #define CONTROL_INT_EN BIT(4)
  44. #define CONTROL_ECC_EN BIT(5)
  45. #define CONTROL_BLOCK_SIZE(x) FIELD_PREP(GENMASK(7, 6), (x))
  46. #define CONTROL_BLOCK_SIZE_32P CONTROL_BLOCK_SIZE(0)
  47. #define CONTROL_BLOCK_SIZE_64P CONTROL_BLOCK_SIZE(1)
  48. #define CONTROL_BLOCK_SIZE_128P CONTROL_BLOCK_SIZE(2)
  49. #define CONTROL_BLOCK_SIZE_256P CONTROL_BLOCK_SIZE(3)
  50. #define STATUS_REG 0x8
  51. #define MEM_RDY(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
  52. #define CTRL_RDY(reg) (FIELD_GET(BIT(8), (reg)) == 0)
  53. #define ECC_CTRL_REG 0x18
  54. #define ECC_CTRL_CAP(x) FIELD_PREP(GENMASK(2, 0), (x))
  55. #define ECC_CTRL_CAP_2B ECC_CTRL_CAP(0)
  56. #define ECC_CTRL_CAP_4B ECC_CTRL_CAP(1)
  57. #define ECC_CTRL_CAP_8B ECC_CTRL_CAP(2)
  58. #define ECC_CTRL_CAP_16B ECC_CTRL_CAP(3)
  59. #define ECC_CTRL_CAP_24B ECC_CTRL_CAP(4)
  60. #define ECC_CTRL_CAP_32B ECC_CTRL_CAP(5)
  61. #define ECC_CTRL_ERR_THRESHOLD(x) FIELD_PREP(GENMASK(13, 8), (x))
  62. #define INT_MASK_REG 0x10
  63. #define INT_STATUS_REG 0x14
  64. #define INT_CMD_END BIT(1)
  65. #define INT_DMA_END BIT(3)
  66. #define INT_MEM_RDY(cs) FIELD_PREP(GENMASK(11, 8), BIT(cs))
  67. #define INT_DMA_ENDED BIT(3)
  68. #define MEM_IS_RDY(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
  69. #define DMA_HAS_ENDED(reg) FIELD_GET(BIT(3), (reg))
  70. #define ECC_OFFSET_REG 0x1C
  71. #define ECC_OFFSET(x) FIELD_PREP(GENMASK(15, 0), (x))
  72. #define ECC_STAT_REG 0x20
  73. #define ECC_STAT_CORRECTABLE(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
  74. #define ECC_STAT_UNCORRECTABLE(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
  75. #define ADDR0_COL_REG 0x24
  76. #define ADDR0_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
  77. #define ADDR0_ROW_REG 0x28
  78. #define ADDR0_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
  79. #define ADDR1_COL_REG 0x2C
  80. #define ADDR1_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
  81. #define ADDR1_ROW_REG 0x30
  82. #define ADDR1_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
  83. #define FIFO_DATA_REG 0x38
  84. #define DATA_REG 0x3C
  85. #define DATA_REG_SIZE_REG 0x40
  86. #define DMA_ADDR_LOW_REG 0x64
  87. #define DMA_ADDR_HIGH_REG 0x68
  88. #define DMA_CNT_REG 0x6C
  89. #define DMA_CTRL_REG 0x70
  90. #define DMA_CTRL_INCREMENT_BURST_4 0
  91. #define DMA_CTRL_REGISTER_MANAGED_MODE 0
  92. #define DMA_CTRL_START BIT(7)
  93. #define MEM_CTRL_REG 0x80
  94. #define MEM_CTRL_CS(cs) FIELD_PREP(GENMASK(1, 0), (cs))
  95. #define MEM_CTRL_DIS_WP(cs) FIELD_PREP(GENMASK(11, 8), BIT((cs)))
  96. #define DATA_SIZE_REG 0x84
  97. #define DATA_SIZE(x) FIELD_PREP(GENMASK(14, 0), (x))
  98. #define TIMINGS_ASYN_REG 0x88
  99. #define TIMINGS_ASYN_TRWP(x) FIELD_PREP(GENMASK(3, 0), max((x), 1U) - 1)
  100. #define TIMINGS_ASYN_TRWH(x) FIELD_PREP(GENMASK(7, 4), max((x), 1U) - 1)
  101. #define TIM_SEQ0_REG 0x90
  102. #define TIM_SEQ0_TCCS(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
  103. #define TIM_SEQ0_TADL(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
  104. #define TIM_SEQ0_TRHW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
  105. #define TIM_SEQ0_TWHR(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
  106. #define TIM_SEQ1_REG 0x94
  107. #define TIM_SEQ1_TWB(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
  108. #define TIM_SEQ1_TRR(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
  109. #define TIM_SEQ1_TWW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
  110. #define TIM_GEN_SEQ0_REG 0x98
  111. #define TIM_GEN_SEQ0_D0(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
  112. #define TIM_GEN_SEQ0_D1(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
  113. #define TIM_GEN_SEQ0_D2(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
  114. #define TIM_GEN_SEQ0_D3(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
  115. #define TIM_GEN_SEQ1_REG 0x9c
  116. #define TIM_GEN_SEQ1_D4(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
  117. #define TIM_GEN_SEQ1_D5(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
  118. #define TIM_GEN_SEQ1_D6(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
  119. #define TIM_GEN_SEQ1_D7(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
  120. #define TIM_GEN_SEQ2_REG 0xA0
  121. #define TIM_GEN_SEQ2_D8(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
  122. #define TIM_GEN_SEQ2_D9(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
  123. #define TIM_GEN_SEQ2_D10(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
  124. #define TIM_GEN_SEQ2_D11(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
  125. #define FIFO_INIT_REG 0xB4
  126. #define FIFO_INIT BIT(0)
  127. #define FIFO_STATE_REG 0xB4
  128. #define FIFO_STATE_R_EMPTY(reg) FIELD_GET(BIT(0), (reg))
  129. #define FIFO_STATE_W_FULL(reg) FIELD_GET(BIT(1), (reg))
  130. #define FIFO_STATE_C_EMPTY(reg) FIELD_GET(BIT(2), (reg))
  131. #define FIFO_STATE_R_FULL(reg) FIELD_GET(BIT(6), (reg))
  132. #define FIFO_STATE_W_EMPTY(reg) FIELD_GET(BIT(7), (reg))
  133. #define GEN_SEQ_CTRL_REG 0xB8
  134. #define GEN_SEQ_CMD0_EN BIT(0)
  135. #define GEN_SEQ_CMD1_EN BIT(1)
  136. #define GEN_SEQ_CMD2_EN BIT(2)
  137. #define GEN_SEQ_CMD3_EN BIT(3)
  138. #define GEN_SEQ_COL_A0(x) FIELD_PREP(GENMASK(5, 4), min((x), 2U))
  139. #define GEN_SEQ_COL_A1(x) FIELD_PREP(GENMASK(7, 6), min((x), 2U))
  140. #define GEN_SEQ_ROW_A0(x) FIELD_PREP(GENMASK(9, 8), min((x), 3U))
  141. #define GEN_SEQ_ROW_A1(x) FIELD_PREP(GENMASK(11, 10), min((x), 3U))
  142. #define GEN_SEQ_DATA_EN BIT(12)
  143. #define GEN_SEQ_DELAY_EN(x) FIELD_PREP(GENMASK(14, 13), (x))
  144. #define GEN_SEQ_DELAY0_EN GEN_SEQ_DELAY_EN(1)
  145. #define GEN_SEQ_DELAY1_EN GEN_SEQ_DELAY_EN(2)
  146. #define GEN_SEQ_IMD_SEQ BIT(15)
  147. #define GEN_SEQ_COMMAND_3(x) FIELD_PREP(GENMASK(26, 16), (x))
  148. #define DMA_TLVL_REG 0x114
  149. #define DMA_TLVL(x) FIELD_PREP(GENMASK(7, 0), (x))
  150. #define DMA_TLVL_MAX DMA_TLVL(0xFF)
  151. #define TIM_GEN_SEQ3_REG 0x134
  152. #define TIM_GEN_SEQ3_D12(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
  153. #define ECC_CNT_REG 0x14C
  154. #define ECC_CNT(cs, reg) FIELD_GET(GENMASK(5, 0), (reg) >> ((cs) * 8))
  155. #define RNANDC_CS_NUM 4
  156. #define TO_CYCLES64(ps, period_ns) ((unsigned int)DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
  157. period_ns))
  158. struct rnand_chip_sel {
  159. unsigned int cs;
  160. };
  161. struct rnand_chip {
  162. struct nand_chip chip;
  163. struct list_head node;
  164. int selected_die;
  165. u32 ctrl;
  166. unsigned int nsels;
  167. u32 control;
  168. u32 ecc_ctrl;
  169. u32 timings_asyn;
  170. u32 tim_seq0;
  171. u32 tim_seq1;
  172. u32 tim_gen_seq0;
  173. u32 tim_gen_seq1;
  174. u32 tim_gen_seq2;
  175. u32 tim_gen_seq3;
  176. struct rnand_chip_sel sels[];
  177. };
  178. struct rnandc {
  179. struct nand_controller controller;
  180. struct device *dev;
  181. void __iomem *regs;
  182. unsigned long ext_clk_rate;
  183. unsigned long assigned_cs;
  184. struct list_head chips;
  185. struct nand_chip *selected_chip;
  186. struct completion complete;
  187. bool use_polling;
  188. u8 *buf;
  189. unsigned int buf_sz;
  190. };
  191. struct rnandc_op {
  192. u32 command;
  193. u32 addr0_col;
  194. u32 addr0_row;
  195. u32 addr1_col;
  196. u32 addr1_row;
  197. u32 data_size;
  198. u32 ecc_offset;
  199. u32 gen_seq_ctrl;
  200. u8 *buf;
  201. bool read;
  202. unsigned int len;
  203. };
  204. static inline struct rnandc *to_rnandc(struct nand_controller *ctrl)
  205. {
  206. return container_of(ctrl, struct rnandc, controller);
  207. }
  208. static inline struct rnand_chip *to_rnand(struct nand_chip *chip)
  209. {
  210. return container_of(chip, struct rnand_chip, chip);
  211. }
  212. static inline unsigned int to_rnandc_cs(struct rnand_chip *nand)
  213. {
  214. return nand->sels[nand->selected_die].cs;
  215. }
  216. static void rnandc_dis_correction(struct rnandc *rnandc)
  217. {
  218. u32 control;
  219. control = readl_relaxed(rnandc->regs + CONTROL_REG);
  220. control &= ~CONTROL_ECC_EN;
  221. writel_relaxed(control, rnandc->regs + CONTROL_REG);
  222. }
  223. static void rnandc_en_correction(struct rnandc *rnandc)
  224. {
  225. u32 control;
  226. control = readl_relaxed(rnandc->regs + CONTROL_REG);
  227. control |= CONTROL_ECC_EN;
  228. writel_relaxed(control, rnandc->regs + CONTROL_REG);
  229. }
  230. static void rnandc_clear_status(struct rnandc *rnandc)
  231. {
  232. writel_relaxed(0, rnandc->regs + INT_STATUS_REG);
  233. writel_relaxed(0, rnandc->regs + ECC_STAT_REG);
  234. writel_relaxed(0, rnandc->regs + ECC_CNT_REG);
  235. }
  236. static void rnandc_dis_interrupts(struct rnandc *rnandc)
  237. {
  238. writel_relaxed(0, rnandc->regs + INT_MASK_REG);
  239. }
  240. static void rnandc_en_interrupts(struct rnandc *rnandc, u32 val)
  241. {
  242. if (!rnandc->use_polling)
  243. writel_relaxed(val, rnandc->regs + INT_MASK_REG);
  244. }
  245. static void rnandc_clear_fifo(struct rnandc *rnandc)
  246. {
  247. writel_relaxed(FIFO_INIT, rnandc->regs + FIFO_INIT_REG);
  248. }
  249. static void rnandc_select_target(struct nand_chip *chip, int die_nr)
  250. {
  251. struct rnand_chip *rnand = to_rnand(chip);
  252. struct rnandc *rnandc = to_rnandc(chip->controller);
  253. unsigned int cs = rnand->sels[die_nr].cs;
  254. if (chip == rnandc->selected_chip && die_nr == rnand->selected_die)
  255. return;
  256. rnandc_clear_status(rnandc);
  257. writel_relaxed(MEM_CTRL_CS(cs) | MEM_CTRL_DIS_WP(cs), rnandc->regs + MEM_CTRL_REG);
  258. writel_relaxed(rnand->control, rnandc->regs + CONTROL_REG);
  259. writel_relaxed(rnand->ecc_ctrl, rnandc->regs + ECC_CTRL_REG);
  260. writel_relaxed(rnand->timings_asyn, rnandc->regs + TIMINGS_ASYN_REG);
  261. writel_relaxed(rnand->tim_seq0, rnandc->regs + TIM_SEQ0_REG);
  262. writel_relaxed(rnand->tim_seq1, rnandc->regs + TIM_SEQ1_REG);
  263. writel_relaxed(rnand->tim_gen_seq0, rnandc->regs + TIM_GEN_SEQ0_REG);
  264. writel_relaxed(rnand->tim_gen_seq1, rnandc->regs + TIM_GEN_SEQ1_REG);
  265. writel_relaxed(rnand->tim_gen_seq2, rnandc->regs + TIM_GEN_SEQ2_REG);
  266. writel_relaxed(rnand->tim_gen_seq3, rnandc->regs + TIM_GEN_SEQ3_REG);
  267. rnandc->selected_chip = chip;
  268. rnand->selected_die = die_nr;
  269. }
  270. static void rnandc_trigger_op(struct rnandc *rnandc, struct rnandc_op *rop)
  271. {
  272. writel_relaxed(rop->addr0_col, rnandc->regs + ADDR0_COL_REG);
  273. writel_relaxed(rop->addr0_row, rnandc->regs + ADDR0_ROW_REG);
  274. writel_relaxed(rop->addr1_col, rnandc->regs + ADDR1_COL_REG);
  275. writel_relaxed(rop->addr1_row, rnandc->regs + ADDR1_ROW_REG);
  276. writel_relaxed(rop->ecc_offset, rnandc->regs + ECC_OFFSET_REG);
  277. writel_relaxed(rop->gen_seq_ctrl, rnandc->regs + GEN_SEQ_CTRL_REG);
  278. writel_relaxed(DATA_SIZE(rop->len), rnandc->regs + DATA_SIZE_REG);
  279. writel_relaxed(rop->command, rnandc->regs + COMMAND_REG);
  280. }
  281. static void rnandc_trigger_dma(struct rnandc *rnandc)
  282. {
  283. writel_relaxed(DMA_CTRL_INCREMENT_BURST_4 |
  284. DMA_CTRL_REGISTER_MANAGED_MODE |
  285. DMA_CTRL_START, rnandc->regs + DMA_CTRL_REG);
  286. }
  287. static irqreturn_t rnandc_irq_handler(int irq, void *private)
  288. {
  289. struct rnandc *rnandc = private;
  290. rnandc_dis_interrupts(rnandc);
  291. complete(&rnandc->complete);
  292. return IRQ_HANDLED;
  293. }
  294. static int rnandc_wait_end_of_op(struct rnandc *rnandc,
  295. struct nand_chip *chip)
  296. {
  297. struct rnand_chip *rnand = to_rnand(chip);
  298. unsigned int cs = to_rnandc_cs(rnand);
  299. u32 status;
  300. int ret;
  301. ret = readl_poll_timeout(rnandc->regs + STATUS_REG, status,
  302. MEM_RDY(cs, status) && CTRL_RDY(status),
  303. 1, 100000);
  304. if (ret)
  305. dev_err(rnandc->dev, "Operation timed out, status: 0x%08x\n",
  306. status);
  307. return ret;
  308. }
  309. static int rnandc_wait_end_of_io(struct rnandc *rnandc,
  310. struct nand_chip *chip)
  311. {
  312. int timeout_ms = 1000;
  313. int ret;
  314. if (rnandc->use_polling) {
  315. struct rnand_chip *rnand = to_rnand(chip);
  316. unsigned int cs = to_rnandc_cs(rnand);
  317. u32 status;
  318. ret = readl_poll_timeout(rnandc->regs + INT_STATUS_REG, status,
  319. MEM_IS_RDY(cs, status) &
  320. DMA_HAS_ENDED(status),
  321. 0, timeout_ms * 1000);
  322. } else {
  323. ret = wait_for_completion_timeout(&rnandc->complete,
  324. msecs_to_jiffies(timeout_ms));
  325. if (!ret)
  326. ret = -ETIMEDOUT;
  327. else
  328. ret = 0;
  329. }
  330. return ret;
  331. }
  332. static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
  333. int oob_required, int page)
  334. {
  335. struct rnandc *rnandc = to_rnandc(chip->controller);
  336. struct mtd_info *mtd = nand_to_mtd(chip);
  337. struct rnand_chip *rnand = to_rnand(chip);
  338. unsigned int cs = to_rnandc_cs(rnand);
  339. struct rnandc_op rop = {
  340. .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_READ0) |
  341. COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
  342. COMMAND_SEQ_READ_PAGE,
  343. .addr0_row = page,
  344. .len = mtd->writesize,
  345. .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
  346. };
  347. unsigned int max_bitflips = 0;
  348. dma_addr_t dma_addr;
  349. u32 ecc_stat;
  350. int bf, ret, i;
  351. /* Prepare controller */
  352. rnandc_select_target(chip, chip->cur_cs);
  353. rnandc_clear_status(rnandc);
  354. reinit_completion(&rnandc->complete);
  355. rnandc_en_interrupts(rnandc, INT_DMA_ENDED);
  356. rnandc_en_correction(rnandc);
  357. /* Configure DMA */
  358. dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
  359. DMA_FROM_DEVICE);
  360. writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
  361. writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
  362. writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
  363. rnandc_trigger_op(rnandc, &rop);
  364. rnandc_trigger_dma(rnandc);
  365. ret = rnandc_wait_end_of_io(rnandc, chip);
  366. dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_FROM_DEVICE);
  367. rnandc_dis_correction(rnandc);
  368. if (ret) {
  369. dev_err(rnandc->dev, "Read page operation never ending\n");
  370. return ret;
  371. }
  372. ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
  373. if (oob_required || ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
  374. ret = nand_change_read_column_op(chip, mtd->writesize,
  375. chip->oob_poi, mtd->oobsize,
  376. false);
  377. if (ret)
  378. return ret;
  379. }
  380. if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
  381. for (i = 0; i < chip->ecc.steps; i++) {
  382. unsigned int off = i * chip->ecc.size;
  383. unsigned int eccoff = i * chip->ecc.bytes;
  384. bf = nand_check_erased_ecc_chunk(rnandc->buf + off,
  385. chip->ecc.size,
  386. chip->oob_poi + 2 + eccoff,
  387. chip->ecc.bytes,
  388. NULL, 0,
  389. chip->ecc.strength);
  390. if (bf < 0) {
  391. mtd->ecc_stats.failed++;
  392. } else {
  393. mtd->ecc_stats.corrected += bf;
  394. max_bitflips = max_t(unsigned int, max_bitflips, bf);
  395. }
  396. }
  397. } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
  398. bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
  399. /*
  400. * The number of bitflips is an approximation given the fact
  401. * that this controller does not provide per-chunk details but
  402. * only gives statistics on the entire page.
  403. */
  404. mtd->ecc_stats.corrected += bf;
  405. }
  406. memcpy(buf, rnandc->buf, mtd->writesize);
  407. return 0;
  408. }
  409. static int rnandc_read_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
  410. u32 req_len, u8 *bufpoi, int page)
  411. {
  412. struct rnandc *rnandc = to_rnandc(chip->controller);
  413. struct mtd_info *mtd = nand_to_mtd(chip);
  414. struct rnand_chip *rnand = to_rnand(chip);
  415. unsigned int cs = to_rnandc_cs(rnand);
  416. unsigned int page_off = round_down(req_offset, chip->ecc.size);
  417. unsigned int real_len = round_up(req_offset + req_len - page_off,
  418. chip->ecc.size);
  419. unsigned int start_chunk = page_off / chip->ecc.size;
  420. unsigned int nchunks = real_len / chip->ecc.size;
  421. unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
  422. struct rnandc_op rop = {
  423. .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_READ0) |
  424. COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
  425. COMMAND_SEQ_READ_PAGE,
  426. .addr0_row = page,
  427. .addr0_col = page_off,
  428. .len = real_len,
  429. .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
  430. };
  431. unsigned int max_bitflips = 0, i;
  432. u32 ecc_stat;
  433. int bf, ret;
  434. /* Prepare controller */
  435. rnandc_select_target(chip, chip->cur_cs);
  436. rnandc_clear_status(rnandc);
  437. rnandc_en_correction(rnandc);
  438. rnandc_trigger_op(rnandc, &rop);
  439. while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
  440. cpu_relax();
  441. while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
  442. cpu_relax();
  443. ioread32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
  444. real_len / 4);
  445. if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
  446. dev_err(rnandc->dev, "Clearing residual data in the read FIFO\n");
  447. rnandc_clear_fifo(rnandc);
  448. }
  449. ret = rnandc_wait_end_of_op(rnandc, chip);
  450. rnandc_dis_correction(rnandc);
  451. if (ret) {
  452. dev_err(rnandc->dev, "Read subpage operation never ending\n");
  453. return ret;
  454. }
  455. ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
  456. if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
  457. ret = nand_change_read_column_op(chip, mtd->writesize,
  458. chip->oob_poi, mtd->oobsize,
  459. false);
  460. if (ret)
  461. return ret;
  462. for (i = start_chunk; i < nchunks; i++) {
  463. unsigned int dataoff = i * chip->ecc.size;
  464. unsigned int eccoff = 2 + (i * chip->ecc.bytes);
  465. bf = nand_check_erased_ecc_chunk(bufpoi + dataoff,
  466. chip->ecc.size,
  467. chip->oob_poi + eccoff,
  468. chip->ecc.bytes,
  469. NULL, 0,
  470. chip->ecc.strength);
  471. if (bf < 0) {
  472. mtd->ecc_stats.failed++;
  473. } else {
  474. mtd->ecc_stats.corrected += bf;
  475. max_bitflips = max_t(unsigned int, max_bitflips, bf);
  476. }
  477. }
  478. } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
  479. bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
  480. /*
  481. * The number of bitflips is an approximation given the fact
  482. * that this controller does not provide per-chunk details but
  483. * only gives statistics on the entire page.
  484. */
  485. mtd->ecc_stats.corrected += bf;
  486. }
  487. return 0;
  488. }
  489. static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
  490. int oob_required, int page)
  491. {
  492. struct rnandc *rnandc = to_rnandc(chip->controller);
  493. struct mtd_info *mtd = nand_to_mtd(chip);
  494. struct rnand_chip *rnand = to_rnand(chip);
  495. unsigned int cs = to_rnandc_cs(rnand);
  496. struct rnandc_op rop = {
  497. .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_SEQIN) |
  498. COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
  499. COMMAND_SEQ_WRITE_PAGE,
  500. .addr0_row = page,
  501. .len = mtd->writesize,
  502. .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
  503. };
  504. dma_addr_t dma_addr;
  505. int ret;
  506. memcpy(rnandc->buf, buf, mtd->writesize);
  507. /* Prepare controller */
  508. rnandc_select_target(chip, chip->cur_cs);
  509. rnandc_clear_status(rnandc);
  510. reinit_completion(&rnandc->complete);
  511. rnandc_en_interrupts(rnandc, INT_MEM_RDY(cs));
  512. rnandc_en_correction(rnandc);
  513. /* Configure DMA */
  514. dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
  515. DMA_TO_DEVICE);
  516. writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
  517. writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
  518. writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
  519. rnandc_trigger_op(rnandc, &rop);
  520. rnandc_trigger_dma(rnandc);
  521. ret = rnandc_wait_end_of_io(rnandc, chip);
  522. dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_TO_DEVICE);
  523. rnandc_dis_correction(rnandc);
  524. if (ret) {
  525. dev_err(rnandc->dev, "Write page operation never ending\n");
  526. return ret;
  527. }
  528. if (!oob_required)
  529. return 0;
  530. return nand_change_write_column_op(chip, mtd->writesize, chip->oob_poi,
  531. mtd->oobsize, false);
  532. }
  533. static int rnandc_write_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
  534. u32 req_len, const u8 *bufpoi,
  535. int oob_required, int page)
  536. {
  537. struct rnandc *rnandc = to_rnandc(chip->controller);
  538. struct mtd_info *mtd = nand_to_mtd(chip);
  539. unsigned int page_off = round_down(req_offset, chip->ecc.size);
  540. unsigned int real_len = round_up(req_offset + req_len - page_off,
  541. chip->ecc.size);
  542. unsigned int start_chunk = page_off / chip->ecc.size;
  543. unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
  544. struct rnandc_op rop = {
  545. .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_SEQIN) |
  546. COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
  547. COMMAND_SEQ_WRITE_PAGE,
  548. .addr0_row = page,
  549. .addr0_col = page_off,
  550. .len = real_len,
  551. .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
  552. };
  553. int ret;
  554. /* Prepare controller */
  555. rnandc_select_target(chip, chip->cur_cs);
  556. rnandc_clear_status(rnandc);
  557. rnandc_en_correction(rnandc);
  558. rnandc_trigger_op(rnandc, &rop);
  559. while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
  560. cpu_relax();
  561. iowrite32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
  562. real_len / 4);
  563. while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
  564. cpu_relax();
  565. ret = rnandc_wait_end_of_op(rnandc, chip);
  566. rnandc_dis_correction(rnandc);
  567. if (ret) {
  568. dev_err(rnandc->dev, "Write subpage operation never ending\n");
  569. return ret;
  570. }
  571. return 0;
  572. }
  573. /*
  574. * This controller is simple enough and thus does not need to use the parser
  575. * provided by the core, instead, handle every situation here.
  576. */
  577. static int rnandc_exec_op(struct nand_chip *chip,
  578. const struct nand_operation *op, bool check_only)
  579. {
  580. struct rnandc *rnandc = to_rnandc(chip->controller);
  581. const struct nand_op_instr *instr = NULL;
  582. struct rnandc_op rop = {
  583. .command = COMMAND_INPUT_SEL_AHBS,
  584. .gen_seq_ctrl = GEN_SEQ_IMD_SEQ,
  585. };
  586. unsigned int cmd_phase = 0, addr_phase = 0, data_phase = 0,
  587. delay_phase = 0, delays = 0;
  588. unsigned int op_id, col_addrs, row_addrs, naddrs, remainder, words, i;
  589. const u8 *addrs;
  590. u32 last_bytes;
  591. int ret;
  592. if (!check_only)
  593. rnandc_select_target(chip, op->cs);
  594. for (op_id = 0; op_id < op->ninstrs; op_id++) {
  595. instr = &op->instrs[op_id];
  596. nand_op_trace(" ", instr);
  597. switch (instr->type) {
  598. case NAND_OP_CMD_INSTR:
  599. switch (cmd_phase++) {
  600. case 0:
  601. rop.command |= COMMAND_0(instr->ctx.cmd.opcode);
  602. rop.gen_seq_ctrl |= GEN_SEQ_CMD0_EN;
  603. break;
  604. case 1:
  605. rop.gen_seq_ctrl |= GEN_SEQ_COMMAND_3(instr->ctx.cmd.opcode);
  606. rop.gen_seq_ctrl |= GEN_SEQ_CMD3_EN;
  607. if (addr_phase == 0)
  608. addr_phase = 1;
  609. break;
  610. case 2:
  611. rop.command |= COMMAND_2(instr->ctx.cmd.opcode);
  612. rop.gen_seq_ctrl |= GEN_SEQ_CMD2_EN;
  613. if (addr_phase <= 1)
  614. addr_phase = 2;
  615. break;
  616. case 3:
  617. rop.command |= COMMAND_1(instr->ctx.cmd.opcode);
  618. rop.gen_seq_ctrl |= GEN_SEQ_CMD1_EN;
  619. if (addr_phase <= 1)
  620. addr_phase = 2;
  621. if (delay_phase == 0)
  622. delay_phase = 1;
  623. if (data_phase == 0)
  624. data_phase = 1;
  625. break;
  626. default:
  627. return -EOPNOTSUPP;
  628. }
  629. break;
  630. case NAND_OP_ADDR_INSTR:
  631. addrs = instr->ctx.addr.addrs;
  632. naddrs = instr->ctx.addr.naddrs;
  633. if (naddrs > 5)
  634. return -EOPNOTSUPP;
  635. col_addrs = min(2U, naddrs);
  636. row_addrs = naddrs > 2 ? naddrs - col_addrs : 0;
  637. switch (addr_phase++) {
  638. case 0:
  639. for (i = 0; i < col_addrs; i++)
  640. rop.addr0_col |= addrs[i] << (i * 8);
  641. rop.gen_seq_ctrl |= GEN_SEQ_COL_A0(col_addrs);
  642. for (i = 0; i < row_addrs; i++)
  643. rop.addr0_row |= addrs[2 + i] << (i * 8);
  644. rop.gen_seq_ctrl |= GEN_SEQ_ROW_A0(row_addrs);
  645. if (cmd_phase == 0)
  646. cmd_phase = 1;
  647. break;
  648. case 1:
  649. for (i = 0; i < col_addrs; i++)
  650. rop.addr1_col |= addrs[i] << (i * 8);
  651. rop.gen_seq_ctrl |= GEN_SEQ_COL_A1(col_addrs);
  652. for (i = 0; i < row_addrs; i++)
  653. rop.addr1_row |= addrs[2 + i] << (i * 8);
  654. rop.gen_seq_ctrl |= GEN_SEQ_ROW_A1(row_addrs);
  655. if (cmd_phase <= 1)
  656. cmd_phase = 2;
  657. break;
  658. default:
  659. return -EOPNOTSUPP;
  660. }
  661. break;
  662. case NAND_OP_DATA_IN_INSTR:
  663. rop.read = true;
  664. fallthrough;
  665. case NAND_OP_DATA_OUT_INSTR:
  666. rop.gen_seq_ctrl |= GEN_SEQ_DATA_EN;
  667. rop.buf = instr->ctx.data.buf.in;
  668. rop.len = instr->ctx.data.len;
  669. rop.command |= COMMAND_FIFO_SEL;
  670. switch (data_phase++) {
  671. case 0:
  672. if (cmd_phase <= 2)
  673. cmd_phase = 3;
  674. if (addr_phase <= 1)
  675. addr_phase = 2;
  676. if (delay_phase == 0)
  677. delay_phase = 1;
  678. break;
  679. default:
  680. return -EOPNOTSUPP;
  681. }
  682. break;
  683. case NAND_OP_WAITRDY_INSTR:
  684. switch (delay_phase++) {
  685. case 0:
  686. rop.gen_seq_ctrl |= GEN_SEQ_DELAY0_EN;
  687. if (cmd_phase <= 2)
  688. cmd_phase = 3;
  689. break;
  690. case 1:
  691. rop.gen_seq_ctrl |= GEN_SEQ_DELAY1_EN;
  692. if (cmd_phase <= 3)
  693. cmd_phase = 4;
  694. if (data_phase == 0)
  695. data_phase = 1;
  696. break;
  697. default:
  698. return -EOPNOTSUPP;
  699. }
  700. break;
  701. }
  702. }
  703. /*
  704. * Sequence 19 is generic and dedicated to write operations.
  705. * Sequence 18 is also generic and works for all other operations.
  706. */
  707. if (rop.buf && !rop.read)
  708. rop.command |= COMMAND_SEQ_GEN_OUT;
  709. else
  710. rop.command |= COMMAND_SEQ_GEN_IN;
  711. if (delays > 1) {
  712. dev_err(rnandc->dev, "Cannot handle more than one wait delay\n");
  713. return -EOPNOTSUPP;
  714. }
  715. if (check_only)
  716. return 0;
  717. rnandc_trigger_op(rnandc, &rop);
  718. words = rop.len / sizeof(u32);
  719. remainder = rop.len % sizeof(u32);
  720. if (rop.buf && rop.read) {
  721. while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
  722. cpu_relax();
  723. while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
  724. cpu_relax();
  725. ioread32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf, words);
  726. if (remainder) {
  727. last_bytes = readl_relaxed(rnandc->regs + FIFO_DATA_REG);
  728. memcpy(rop.buf + (words * sizeof(u32)), &last_bytes,
  729. remainder);
  730. }
  731. if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
  732. dev_warn(rnandc->dev,
  733. "Clearing residual data in the read FIFO\n");
  734. rnandc_clear_fifo(rnandc);
  735. }
  736. } else if (rop.len && !rop.read) {
  737. while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
  738. cpu_relax();
  739. iowrite32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf,
  740. DIV_ROUND_UP(rop.len, 4));
  741. if (remainder) {
  742. last_bytes = 0;
  743. memcpy(&last_bytes, rop.buf + (words * sizeof(u32)), remainder);
  744. writel_relaxed(last_bytes, rnandc->regs + FIFO_DATA_REG);
  745. }
  746. while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
  747. cpu_relax();
  748. }
  749. ret = rnandc_wait_end_of_op(rnandc, chip);
  750. if (ret)
  751. return ret;
  752. return 0;
  753. }
  754. static int rnandc_setup_interface(struct nand_chip *chip, int chipnr,
  755. const struct nand_interface_config *conf)
  756. {
  757. struct rnand_chip *rnand = to_rnand(chip);
  758. struct rnandc *rnandc = to_rnandc(chip->controller);
  759. unsigned int period_ns = 1000000000 / rnandc->ext_clk_rate;
  760. const struct nand_sdr_timings *sdr;
  761. unsigned int cyc, cle, ale, bef_dly, ca_to_data;
  762. sdr = nand_get_sdr_timings(conf);
  763. if (IS_ERR(sdr))
  764. return PTR_ERR(sdr);
  765. if (sdr->tRP_min != sdr->tWP_min || sdr->tREH_min != sdr->tWH_min) {
  766. dev_err(rnandc->dev, "Read and write hold times must be identical\n");
  767. return -EINVAL;
  768. }
  769. if (chipnr < 0)
  770. return 0;
  771. rnand->timings_asyn =
  772. TIMINGS_ASYN_TRWP(TO_CYCLES64(sdr->tRP_min, period_ns)) |
  773. TIMINGS_ASYN_TRWH(TO_CYCLES64(sdr->tREH_min, period_ns));
  774. rnand->tim_seq0 =
  775. TIM_SEQ0_TCCS(TO_CYCLES64(sdr->tCCS_min, period_ns)) |
  776. TIM_SEQ0_TADL(TO_CYCLES64(sdr->tADL_min, period_ns)) |
  777. TIM_SEQ0_TRHW(TO_CYCLES64(sdr->tRHW_min, period_ns)) |
  778. TIM_SEQ0_TWHR(TO_CYCLES64(sdr->tWHR_min, period_ns));
  779. rnand->tim_seq1 =
  780. TIM_SEQ1_TWB(TO_CYCLES64(sdr->tWB_max, period_ns)) |
  781. TIM_SEQ1_TRR(TO_CYCLES64(sdr->tRR_min, period_ns)) |
  782. TIM_SEQ1_TWW(TO_CYCLES64(sdr->tWW_min, period_ns));
  783. cyc = sdr->tDS_min + sdr->tDH_min;
  784. cle = sdr->tCLH_min + sdr->tCLS_min;
  785. ale = sdr->tALH_min + sdr->tALS_min;
  786. bef_dly = sdr->tWB_max - sdr->tDH_min;
  787. ca_to_data = sdr->tWHR_min + sdr->tREA_max - sdr->tDH_min;
  788. /*
  789. * D0 = CMD -> ADDR = tCLH + tCLS - 1 cycle
  790. * D1 = CMD -> CMD = tCLH + tCLS - 1 cycle
  791. * D2 = CMD -> DLY = tWB - tDH
  792. * D3 = CMD -> DATA = tWHR + tREA - tDH
  793. */
  794. rnand->tim_gen_seq0 =
  795. TIM_GEN_SEQ0_D0(TO_CYCLES64(cle - cyc, period_ns)) |
  796. TIM_GEN_SEQ0_D1(TO_CYCLES64(cle - cyc, period_ns)) |
  797. TIM_GEN_SEQ0_D2(TO_CYCLES64(bef_dly, period_ns)) |
  798. TIM_GEN_SEQ0_D3(TO_CYCLES64(ca_to_data, period_ns));
  799. /*
  800. * D4 = ADDR -> CMD = tALH + tALS - 1 cyle
  801. * D5 = ADDR -> ADDR = tALH + tALS - 1 cyle
  802. * D6 = ADDR -> DLY = tWB - tDH
  803. * D7 = ADDR -> DATA = tWHR + tREA - tDH
  804. */
  805. rnand->tim_gen_seq1 =
  806. TIM_GEN_SEQ1_D4(TO_CYCLES64(ale - cyc, period_ns)) |
  807. TIM_GEN_SEQ1_D5(TO_CYCLES64(ale - cyc, period_ns)) |
  808. TIM_GEN_SEQ1_D6(TO_CYCLES64(bef_dly, period_ns)) |
  809. TIM_GEN_SEQ1_D7(TO_CYCLES64(ca_to_data, period_ns));
  810. /*
  811. * D8 = DLY -> DATA = tRR + tREA
  812. * D9 = DLY -> CMD = tRR
  813. * D10 = DATA -> CMD = tCLH + tCLS - 1 cycle
  814. * D11 = DATA -> DLY = tWB - tDH
  815. */
  816. rnand->tim_gen_seq2 =
  817. TIM_GEN_SEQ2_D8(TO_CYCLES64(sdr->tRR_min + sdr->tREA_max, period_ns)) |
  818. TIM_GEN_SEQ2_D9(TO_CYCLES64(sdr->tRR_min, period_ns)) |
  819. TIM_GEN_SEQ2_D10(TO_CYCLES64(cle - cyc, period_ns)) |
  820. TIM_GEN_SEQ2_D11(TO_CYCLES64(bef_dly, period_ns));
  821. /* D12 = DATA -> END = tCLH - tDH */
  822. rnand->tim_gen_seq3 =
  823. TIM_GEN_SEQ3_D12(TO_CYCLES64(sdr->tCLH_min - sdr->tDH_min, period_ns));
  824. return 0;
  825. }
  826. static int rnandc_ooblayout_ecc(struct mtd_info *mtd, int section,
  827. struct mtd_oob_region *oobregion)
  828. {
  829. struct nand_chip *chip = mtd_to_nand(mtd);
  830. unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
  831. if (section)
  832. return -ERANGE;
  833. oobregion->offset = 2;
  834. oobregion->length = eccbytes;
  835. return 0;
  836. }
  837. static int rnandc_ooblayout_free(struct mtd_info *mtd, int section,
  838. struct mtd_oob_region *oobregion)
  839. {
  840. struct nand_chip *chip = mtd_to_nand(mtd);
  841. unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
  842. if (section)
  843. return -ERANGE;
  844. oobregion->offset = 2 + eccbytes;
  845. oobregion->length = mtd->oobsize - oobregion->offset;
  846. return 0;
  847. }
  848. static const struct mtd_ooblayout_ops rnandc_ooblayout_ops = {
  849. .ecc = rnandc_ooblayout_ecc,
  850. .free = rnandc_ooblayout_free,
  851. };
  852. static int rnandc_hw_ecc_controller_init(struct nand_chip *chip)
  853. {
  854. struct rnand_chip *rnand = to_rnand(chip);
  855. struct mtd_info *mtd = nand_to_mtd(chip);
  856. struct rnandc *rnandc = to_rnandc(chip->controller);
  857. if (mtd->writesize > SZ_16K) {
  858. dev_err(rnandc->dev, "Unsupported page size\n");
  859. return -EINVAL;
  860. }
  861. switch (chip->ecc.size) {
  862. case SZ_256:
  863. rnand->control |= CONTROL_ECC_BLOCK_SIZE_256;
  864. break;
  865. case SZ_512:
  866. rnand->control |= CONTROL_ECC_BLOCK_SIZE_512;
  867. break;
  868. case SZ_1K:
  869. rnand->control |= CONTROL_ECC_BLOCK_SIZE_1024;
  870. break;
  871. default:
  872. dev_err(rnandc->dev, "Unsupported ECC chunk size\n");
  873. return -EINVAL;
  874. }
  875. switch (chip->ecc.strength) {
  876. case 2:
  877. chip->ecc.bytes = 4;
  878. rnand->ecc_ctrl |= ECC_CTRL_CAP_2B;
  879. break;
  880. case 4:
  881. chip->ecc.bytes = 7;
  882. rnand->ecc_ctrl |= ECC_CTRL_CAP_4B;
  883. break;
  884. case 8:
  885. chip->ecc.bytes = 14;
  886. rnand->ecc_ctrl |= ECC_CTRL_CAP_8B;
  887. break;
  888. case 16:
  889. chip->ecc.bytes = 28;
  890. rnand->ecc_ctrl |= ECC_CTRL_CAP_16B;
  891. break;
  892. case 24:
  893. chip->ecc.bytes = 42;
  894. rnand->ecc_ctrl |= ECC_CTRL_CAP_24B;
  895. break;
  896. case 32:
  897. chip->ecc.bytes = 56;
  898. rnand->ecc_ctrl |= ECC_CTRL_CAP_32B;
  899. break;
  900. default:
  901. dev_err(rnandc->dev, "Unsupported ECC strength\n");
  902. return -EINVAL;
  903. }
  904. rnand->ecc_ctrl |= ECC_CTRL_ERR_THRESHOLD(chip->ecc.strength);
  905. mtd_set_ooblayout(mtd, &rnandc_ooblayout_ops);
  906. chip->ecc.steps = mtd->writesize / chip->ecc.size;
  907. chip->ecc.read_page = rnandc_read_page_hw_ecc;
  908. chip->ecc.read_subpage = rnandc_read_subpage_hw_ecc;
  909. chip->ecc.write_page = rnandc_write_page_hw_ecc;
  910. chip->ecc.write_subpage = rnandc_write_subpage_hw_ecc;
  911. return 0;
  912. }
  913. static int rnandc_ecc_init(struct nand_chip *chip)
  914. {
  915. struct nand_ecc_ctrl *ecc = &chip->ecc;
  916. const struct nand_ecc_props *requirements =
  917. nanddev_get_ecc_requirements(&chip->base);
  918. struct rnandc *rnandc = to_rnandc(chip->controller);
  919. int ret;
  920. if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
  921. (!ecc->size || !ecc->strength)) {
  922. if (requirements->step_size && requirements->strength) {
  923. ecc->size = requirements->step_size;
  924. ecc->strength = requirements->strength;
  925. } else {
  926. dev_err(rnandc->dev, "No minimum ECC strength\n");
  927. return -EINVAL;
  928. }
  929. }
  930. switch (ecc->engine_type) {
  931. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  932. ret = rnandc_hw_ecc_controller_init(chip);
  933. if (ret)
  934. return ret;
  935. break;
  936. case NAND_ECC_ENGINE_TYPE_NONE:
  937. case NAND_ECC_ENGINE_TYPE_SOFT:
  938. case NAND_ECC_ENGINE_TYPE_ON_DIE:
  939. break;
  940. default:
  941. return -EINVAL;
  942. }
  943. return 0;
  944. }
  945. static int rnandc_attach_chip(struct nand_chip *chip)
  946. {
  947. struct rnand_chip *rnand = to_rnand(chip);
  948. struct rnandc *rnandc = to_rnandc(chip->controller);
  949. struct mtd_info *mtd = nand_to_mtd(chip);
  950. struct nand_memory_organization *memorg = nanddev_get_memorg(&chip->base);
  951. int ret;
  952. /* Do not store BBT bits in the OOB section as it is not protected */
  953. if (chip->bbt_options & NAND_BBT_USE_FLASH)
  954. chip->bbt_options |= NAND_BBT_NO_OOB;
  955. if (mtd->writesize <= 512) {
  956. dev_err(rnandc->dev, "Small page devices not supported\n");
  957. return -EINVAL;
  958. }
  959. rnand->control |= CONTROL_CHECK_RB_LINE | CONTROL_INT_EN;
  960. switch (memorg->pages_per_eraseblock) {
  961. case 32:
  962. rnand->control |= CONTROL_BLOCK_SIZE_32P;
  963. break;
  964. case 64:
  965. rnand->control |= CONTROL_BLOCK_SIZE_64P;
  966. break;
  967. case 128:
  968. rnand->control |= CONTROL_BLOCK_SIZE_128P;
  969. break;
  970. case 256:
  971. rnand->control |= CONTROL_BLOCK_SIZE_256P;
  972. break;
  973. default:
  974. dev_err(rnandc->dev, "Unsupported memory organization\n");
  975. return -EINVAL;
  976. }
  977. chip->options |= NAND_SUBPAGE_READ;
  978. ret = rnandc_ecc_init(chip);
  979. if (ret) {
  980. dev_err(rnandc->dev, "ECC initialization failed (%d)\n", ret);
  981. return ret;
  982. }
  983. /* Force an update of the configuration registers */
  984. rnand->selected_die = -1;
  985. return 0;
  986. }
  987. static const struct nand_controller_ops rnandc_ops = {
  988. .attach_chip = rnandc_attach_chip,
  989. .exec_op = rnandc_exec_op,
  990. .setup_interface = rnandc_setup_interface,
  991. };
  992. static int rnandc_alloc_dma_buf(struct rnandc *rnandc,
  993. struct mtd_info *new_mtd)
  994. {
  995. unsigned int max_len = new_mtd->writesize + new_mtd->oobsize;
  996. struct rnand_chip *entry, *temp;
  997. struct nand_chip *chip;
  998. struct mtd_info *mtd;
  999. list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
  1000. chip = &entry->chip;
  1001. mtd = nand_to_mtd(chip);
  1002. max_len = max(max_len, mtd->writesize + mtd->oobsize);
  1003. }
  1004. if (rnandc->buf && rnandc->buf_sz < max_len) {
  1005. devm_kfree(rnandc->dev, rnandc->buf);
  1006. rnandc->buf = NULL;
  1007. }
  1008. if (!rnandc->buf) {
  1009. rnandc->buf_sz = max_len;
  1010. rnandc->buf = devm_kmalloc(rnandc->dev, max_len,
  1011. GFP_KERNEL | GFP_DMA);
  1012. if (!rnandc->buf)
  1013. return -ENOMEM;
  1014. }
  1015. return 0;
  1016. }
  1017. static int rnandc_chip_init(struct rnandc *rnandc, struct device_node *np)
  1018. {
  1019. struct rnand_chip *rnand;
  1020. struct mtd_info *mtd;
  1021. struct nand_chip *chip;
  1022. int nsels, ret, i;
  1023. u32 cs;
  1024. nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
  1025. if (nsels <= 0) {
  1026. ret = (nsels < 0) ? nsels : -EINVAL;
  1027. dev_err(rnandc->dev, "Invalid reg property (%d)\n", ret);
  1028. return ret;
  1029. }
  1030. /* Alloc the driver's NAND chip structure */
  1031. rnand = devm_kzalloc(rnandc->dev, struct_size(rnand, sels, nsels),
  1032. GFP_KERNEL);
  1033. if (!rnand)
  1034. return -ENOMEM;
  1035. rnand->nsels = nsels;
  1036. rnand->selected_die = -1;
  1037. for (i = 0; i < nsels; i++) {
  1038. ret = of_property_read_u32_index(np, "reg", i, &cs);
  1039. if (ret) {
  1040. dev_err(rnandc->dev, "Incomplete reg property (%d)\n", ret);
  1041. return ret;
  1042. }
  1043. if (cs >= RNANDC_CS_NUM) {
  1044. dev_err(rnandc->dev, "Invalid reg property (%d)\n", cs);
  1045. return -EINVAL;
  1046. }
  1047. if (test_and_set_bit(cs, &rnandc->assigned_cs)) {
  1048. dev_err(rnandc->dev, "CS %d already assigned\n", cs);
  1049. return -EINVAL;
  1050. }
  1051. /*
  1052. * No need to check for RB or WP properties, there is a 1:1
  1053. * mandatory mapping with the CS.
  1054. */
  1055. rnand->sels[i].cs = cs;
  1056. }
  1057. chip = &rnand->chip;
  1058. chip->controller = &rnandc->controller;
  1059. nand_set_flash_node(chip, np);
  1060. mtd = nand_to_mtd(chip);
  1061. mtd->dev.parent = rnandc->dev;
  1062. if (!mtd->name) {
  1063. dev_err(rnandc->dev, "Missing MTD label\n");
  1064. return -EINVAL;
  1065. }
  1066. ret = nand_scan(chip, rnand->nsels);
  1067. if (ret) {
  1068. dev_err(rnandc->dev, "Failed to scan the NAND chip (%d)\n", ret);
  1069. return ret;
  1070. }
  1071. ret = rnandc_alloc_dma_buf(rnandc, mtd);
  1072. if (ret)
  1073. goto cleanup_nand;
  1074. ret = mtd_device_register(mtd, NULL, 0);
  1075. if (ret) {
  1076. dev_err(rnandc->dev, "Failed to register MTD device (%d)\n", ret);
  1077. goto cleanup_nand;
  1078. }
  1079. list_add_tail(&rnand->node, &rnandc->chips);
  1080. return 0;
  1081. cleanup_nand:
  1082. nand_cleanup(chip);
  1083. return ret;
  1084. }
  1085. static void rnandc_chips_cleanup(struct rnandc *rnandc)
  1086. {
  1087. struct rnand_chip *entry, *temp;
  1088. struct nand_chip *chip;
  1089. int ret;
  1090. list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
  1091. chip = &entry->chip;
  1092. ret = mtd_device_unregister(nand_to_mtd(chip));
  1093. WARN_ON(ret);
  1094. nand_cleanup(chip);
  1095. list_del(&entry->node);
  1096. }
  1097. }
  1098. static int rnandc_chips_init(struct rnandc *rnandc)
  1099. {
  1100. struct device_node *np;
  1101. int ret;
  1102. for_each_child_of_node(rnandc->dev->of_node, np) {
  1103. ret = rnandc_chip_init(rnandc, np);
  1104. if (ret) {
  1105. of_node_put(np);
  1106. goto cleanup_chips;
  1107. }
  1108. }
  1109. return 0;
  1110. cleanup_chips:
  1111. rnandc_chips_cleanup(rnandc);
  1112. return ret;
  1113. }
  1114. static int rnandc_probe(struct platform_device *pdev)
  1115. {
  1116. struct rnandc *rnandc;
  1117. struct clk *eclk;
  1118. int irq, ret;
  1119. rnandc = devm_kzalloc(&pdev->dev, sizeof(*rnandc), GFP_KERNEL);
  1120. if (!rnandc)
  1121. return -ENOMEM;
  1122. rnandc->dev = &pdev->dev;
  1123. nand_controller_init(&rnandc->controller);
  1124. rnandc->controller.ops = &rnandc_ops;
  1125. INIT_LIST_HEAD(&rnandc->chips);
  1126. init_completion(&rnandc->complete);
  1127. rnandc->regs = devm_platform_ioremap_resource(pdev, 0);
  1128. if (IS_ERR(rnandc->regs))
  1129. return PTR_ERR(rnandc->regs);
  1130. devm_pm_runtime_enable(&pdev->dev);
  1131. ret = pm_runtime_resume_and_get(&pdev->dev);
  1132. if (ret < 0)
  1133. return ret;
  1134. /* The external NAND bus clock rate is needed for computing timings */
  1135. eclk = clk_get(&pdev->dev, "eclk");
  1136. if (IS_ERR(eclk)) {
  1137. ret = PTR_ERR(eclk);
  1138. goto dis_runtime_pm;
  1139. }
  1140. rnandc->ext_clk_rate = clk_get_rate(eclk);
  1141. clk_put(eclk);
  1142. rnandc_dis_interrupts(rnandc);
  1143. irq = platform_get_irq_optional(pdev, 0);
  1144. if (irq == -EPROBE_DEFER) {
  1145. ret = irq;
  1146. goto dis_runtime_pm;
  1147. } else if (irq < 0) {
  1148. dev_info(&pdev->dev, "No IRQ found, fallback to polling\n");
  1149. rnandc->use_polling = true;
  1150. } else {
  1151. ret = devm_request_irq(&pdev->dev, irq, rnandc_irq_handler, 0,
  1152. "renesas-nand-controller", rnandc);
  1153. if (ret < 0)
  1154. goto dis_runtime_pm;
  1155. }
  1156. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  1157. if (ret)
  1158. goto dis_runtime_pm;
  1159. rnandc_clear_fifo(rnandc);
  1160. platform_set_drvdata(pdev, rnandc);
  1161. ret = rnandc_chips_init(rnandc);
  1162. if (ret)
  1163. goto dis_runtime_pm;
  1164. return 0;
  1165. dis_runtime_pm:
  1166. pm_runtime_put(&pdev->dev);
  1167. return ret;
  1168. }
  1169. static int rnandc_remove(struct platform_device *pdev)
  1170. {
  1171. struct rnandc *rnandc = platform_get_drvdata(pdev);
  1172. rnandc_chips_cleanup(rnandc);
  1173. pm_runtime_put(&pdev->dev);
  1174. return 0;
  1175. }
  1176. static const struct of_device_id rnandc_id_table[] = {
  1177. { .compatible = "renesas,rcar-gen3-nandc" },
  1178. { .compatible = "renesas,rzn1-nandc" },
  1179. {} /* sentinel */
  1180. };
  1181. MODULE_DEVICE_TABLE(of, rnandc_id_table);
  1182. static struct platform_driver rnandc_driver = {
  1183. .driver = {
  1184. .name = "renesas-nandc",
  1185. .of_match_table = rnandc_id_table,
  1186. },
  1187. .probe = rnandc_probe,
  1188. .remove = rnandc_remove,
  1189. };
  1190. module_platform_driver(rnandc_driver);
  1191. MODULE_AUTHOR("Miquel Raynal <[email protected]>");
  1192. MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 NAND controller driver");
  1193. MODULE_LICENSE("GPL v2");