arasan-nand-controller.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Arasan NAND Flash Controller Driver
  4. *
  5. * Copyright (C) 2014 - 2020 Xilinx, Inc.
  6. * Author:
  7. * Miquel Raynal <[email protected]>
  8. * Original work (fully rewritten):
  9. * Punnaiah Choudary Kalluri <[email protected]>
  10. * Naga Sureshkumar Relli <[email protected]>
  11. */
  12. #include <linux/bch.h>
  13. #include <linux/bitfield.h>
  14. #include <linux/clk.h>
  15. #include <linux/delay.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/gpio/consumer.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/iopoll.h>
  20. #include <linux/module.h>
  21. #include <linux/mtd/mtd.h>
  22. #include <linux/mtd/partitions.h>
  23. #include <linux/mtd/rawnand.h>
  24. #include <linux/of.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/slab.h>
  27. #define PKT_REG 0x00
  28. #define PKT_SIZE(x) FIELD_PREP(GENMASK(10, 0), (x))
  29. #define PKT_STEPS(x) FIELD_PREP(GENMASK(23, 12), (x))
  30. #define MEM_ADDR1_REG 0x04
  31. #define MEM_ADDR2_REG 0x08
  32. #define ADDR2_STRENGTH(x) FIELD_PREP(GENMASK(27, 25), (x))
  33. #define ADDR2_CS(x) FIELD_PREP(GENMASK(31, 30), (x))
  34. #define CMD_REG 0x0C
  35. #define CMD_1(x) FIELD_PREP(GENMASK(7, 0), (x))
  36. #define CMD_2(x) FIELD_PREP(GENMASK(15, 8), (x))
  37. #define CMD_PAGE_SIZE(x) FIELD_PREP(GENMASK(25, 23), (x))
  38. #define CMD_DMA_ENABLE BIT(27)
  39. #define CMD_NADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
  40. #define CMD_ECC_ENABLE BIT(31)
  41. #define PROG_REG 0x10
  42. #define PROG_PGRD BIT(0)
  43. #define PROG_ERASE BIT(2)
  44. #define PROG_STATUS BIT(3)
  45. #define PROG_PGPROG BIT(4)
  46. #define PROG_RDID BIT(6)
  47. #define PROG_RDPARAM BIT(7)
  48. #define PROG_RST BIT(8)
  49. #define PROG_GET_FEATURE BIT(9)
  50. #define PROG_SET_FEATURE BIT(10)
  51. #define PROG_CHG_RD_COL_ENH BIT(14)
  52. #define INTR_STS_EN_REG 0x14
  53. #define INTR_SIG_EN_REG 0x18
  54. #define INTR_STS_REG 0x1C
  55. #define WRITE_READY BIT(0)
  56. #define READ_READY BIT(1)
  57. #define XFER_COMPLETE BIT(2)
  58. #define DMA_BOUNDARY BIT(6)
  59. #define EVENT_MASK GENMASK(7, 0)
  60. #define READY_STS_REG 0x20
  61. #define DMA_ADDR0_REG 0x50
  62. #define DMA_ADDR1_REG 0x24
  63. #define FLASH_STS_REG 0x28
  64. #define TIMING_REG 0x2C
  65. #define TCCS_TIME_500NS 0
  66. #define TCCS_TIME_300NS 3
  67. #define TCCS_TIME_200NS 2
  68. #define TCCS_TIME_100NS 1
  69. #define FAST_TCAD BIT(2)
  70. #define DQS_BUFF_SEL_IN(x) FIELD_PREP(GENMASK(6, 3), (x))
  71. #define DQS_BUFF_SEL_OUT(x) FIELD_PREP(GENMASK(18, 15), (x))
  72. #define DATA_PORT_REG 0x30
  73. #define ECC_CONF_REG 0x34
  74. #define ECC_CONF_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
  75. #define ECC_CONF_LEN(x) FIELD_PREP(GENMASK(26, 16), (x))
  76. #define ECC_CONF_BCH_EN BIT(27)
  77. #define ECC_ERR_CNT_REG 0x38
  78. #define GET_PKT_ERR_CNT(x) FIELD_GET(GENMASK(7, 0), (x))
  79. #define GET_PAGE_ERR_CNT(x) FIELD_GET(GENMASK(16, 8), (x))
  80. #define ECC_SP_REG 0x3C
  81. #define ECC_SP_CMD1(x) FIELD_PREP(GENMASK(7, 0), (x))
  82. #define ECC_SP_CMD2(x) FIELD_PREP(GENMASK(15, 8), (x))
  83. #define ECC_SP_ADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
  84. #define ECC_1ERR_CNT_REG 0x40
  85. #define ECC_2ERR_CNT_REG 0x44
  86. #define DATA_INTERFACE_REG 0x6C
  87. #define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x))
  88. #define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (x))
  89. #define DIFACE_SDR 0
  90. #define DIFACE_NVDDR BIT(9)
  91. #define ANFC_MAX_CS 2
  92. #define ANFC_DFLT_TIMEOUT_US 1000000
  93. #define ANFC_MAX_CHUNK_SIZE SZ_1M
  94. #define ANFC_MAX_PARAM_SIZE SZ_4K
  95. #define ANFC_MAX_STEPS SZ_2K
  96. #define ANFC_MAX_PKT_SIZE (SZ_2K - 1)
  97. #define ANFC_MAX_ADDR_CYC 5U
  98. #define ANFC_RSVD_ECC_BYTES 21
  99. #define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000
  100. #define ANFC_XLNX_SDR_HS_CORE_CLK 80000000
  101. static struct gpio_desc *anfc_default_cs_array[2] = {NULL, NULL};
  102. /**
  103. * struct anfc_op - Defines how to execute an operation
  104. * @pkt_reg: Packet register
  105. * @addr1_reg: Memory address 1 register
  106. * @addr2_reg: Memory address 2 register
  107. * @cmd_reg: Command register
  108. * @prog_reg: Program register
  109. * @steps: Number of "packets" to read/write
  110. * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
  111. * @len: Data transfer length
  112. * @read: Data transfer direction from the controller point of view
  113. * @buf: Data buffer
  114. */
  115. struct anfc_op {
  116. u32 pkt_reg;
  117. u32 addr1_reg;
  118. u32 addr2_reg;
  119. u32 cmd_reg;
  120. u32 prog_reg;
  121. int steps;
  122. unsigned int rdy_timeout_ms;
  123. unsigned int len;
  124. bool read;
  125. u8 *buf;
  126. };
  127. /**
  128. * struct anand - Defines the NAND chip related information
  129. * @node: Used to store NAND chips into a list
  130. * @chip: NAND chip information structure
  131. * @rb: Ready-busy line
  132. * @page_sz: Register value of the page_sz field to use
  133. * @clk: Expected clock frequency to use
  134. * @data_iface: Data interface timing mode to use
  135. * @timings: NV-DDR specific timings to use
  136. * @ecc_conf: Hardware ECC configuration value
  137. * @strength: Register value of the ECC strength
  138. * @raddr_cycles: Row address cycle information
  139. * @caddr_cycles: Column address cycle information
  140. * @ecc_bits: Exact number of ECC bits per syndrome
  141. * @ecc_total: Total number of ECC bytes
  142. * @errloc: Array of errors located with soft BCH
  143. * @hw_ecc: Buffer to store syndromes computed by hardware
  144. * @bch: BCH structure
  145. * @cs_idx: Array of chip-select for this device, values are indexes
  146. * of the controller structure @gpio_cs array
  147. * @ncs_idx: Size of the @cs_idx array
  148. */
  149. struct anand {
  150. struct list_head node;
  151. struct nand_chip chip;
  152. unsigned int rb;
  153. unsigned int page_sz;
  154. unsigned long clk;
  155. u32 data_iface;
  156. u32 timings;
  157. u32 ecc_conf;
  158. u32 strength;
  159. u16 raddr_cycles;
  160. u16 caddr_cycles;
  161. unsigned int ecc_bits;
  162. unsigned int ecc_total;
  163. unsigned int *errloc;
  164. u8 *hw_ecc;
  165. struct bch_control *bch;
  166. int *cs_idx;
  167. int ncs_idx;
  168. };
  169. /**
  170. * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
  171. * @dev: Pointer to the device structure
  172. * @base: Remapped register area
  173. * @controller_clk: Pointer to the system clock
  174. * @bus_clk: Pointer to the flash clock
  175. * @controller: Base controller structure
  176. * @chips: List of all NAND chips attached to the controller
  177. * @cur_clk: Current clock rate
  178. * @cs_array: CS array. Native CS are left empty, the other cells are
  179. * populated with their corresponding GPIO descriptor.
  180. * @ncs: Size of @cs_array
  181. * @cur_cs: Index in @cs_array of the currently in use CS
  182. * @native_cs: Currently selected native CS
  183. * @spare_cs: Native CS that is not wired (may be selected when a GPIO
  184. * CS is in use)
  185. */
  186. struct arasan_nfc {
  187. struct device *dev;
  188. void __iomem *base;
  189. struct clk *controller_clk;
  190. struct clk *bus_clk;
  191. struct nand_controller controller;
  192. struct list_head chips;
  193. unsigned int cur_clk;
  194. struct gpio_desc **cs_array;
  195. unsigned int ncs;
  196. int cur_cs;
  197. unsigned int native_cs;
  198. unsigned int spare_cs;
  199. };
  200. static struct anand *to_anand(struct nand_chip *nand)
  201. {
  202. return container_of(nand, struct anand, chip);
  203. }
  204. static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
  205. {
  206. return container_of(ctrl, struct arasan_nfc, controller);
  207. }
  208. static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
  209. {
  210. u32 val;
  211. int ret;
  212. ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
  213. val & event, 0,
  214. ANFC_DFLT_TIMEOUT_US);
  215. if (ret) {
  216. dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
  217. return -ETIMEDOUT;
  218. }
  219. writel_relaxed(event, nfc->base + INTR_STS_REG);
  220. return 0;
  221. }
  222. static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
  223. unsigned int timeout_ms)
  224. {
  225. struct anand *anand = to_anand(chip);
  226. u32 val;
  227. int ret;
  228. /* There is no R/B interrupt, we must poll a register */
  229. ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
  230. val & BIT(anand->rb),
  231. 1, timeout_ms * 1000);
  232. if (ret) {
  233. dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
  234. readl_relaxed(nfc->base + READY_STS_REG));
  235. return -ETIMEDOUT;
  236. }
  237. return 0;
  238. }
  239. static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
  240. {
  241. writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
  242. writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
  243. writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
  244. writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
  245. writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
  246. }
  247. static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
  248. unsigned int *pktsize)
  249. {
  250. unsigned int nb, sz;
  251. for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
  252. sz = len / nb;
  253. if (sz <= ANFC_MAX_PKT_SIZE)
  254. break;
  255. }
  256. if (sz * nb != len)
  257. return -ENOTSUPP;
  258. if (steps)
  259. *steps = nb;
  260. if (pktsize)
  261. *pktsize = sz;
  262. return 0;
  263. }
  264. static bool anfc_is_gpio_cs(struct arasan_nfc *nfc, int nfc_cs)
  265. {
  266. return nfc_cs >= 0 && nfc->cs_array[nfc_cs];
  267. }
  268. static int anfc_relative_to_absolute_cs(struct anand *anand, int num)
  269. {
  270. return anand->cs_idx[num];
  271. }
  272. static void anfc_assert_cs(struct arasan_nfc *nfc, unsigned int nfc_cs_idx)
  273. {
  274. /* CS did not change: do nothing */
  275. if (nfc->cur_cs == nfc_cs_idx)
  276. return;
  277. /* Deassert the previous CS if it was a GPIO */
  278. if (anfc_is_gpio_cs(nfc, nfc->cur_cs))
  279. gpiod_set_value_cansleep(nfc->cs_array[nfc->cur_cs], 1);
  280. /* Assert the new one */
  281. if (anfc_is_gpio_cs(nfc, nfc_cs_idx)) {
  282. nfc->native_cs = nfc->spare_cs;
  283. gpiod_set_value_cansleep(nfc->cs_array[nfc_cs_idx], 0);
  284. } else {
  285. nfc->native_cs = nfc_cs_idx;
  286. }
  287. nfc->cur_cs = nfc_cs_idx;
  288. }
  289. static int anfc_select_target(struct nand_chip *chip, int target)
  290. {
  291. struct anand *anand = to_anand(chip);
  292. struct arasan_nfc *nfc = to_anfc(chip->controller);
  293. unsigned int nfc_cs_idx = anfc_relative_to_absolute_cs(anand, target);
  294. int ret;
  295. anfc_assert_cs(nfc, nfc_cs_idx);
  296. /* Update the controller timings and the potential ECC configuration */
  297. writel_relaxed(anand->data_iface, nfc->base + DATA_INTERFACE_REG);
  298. writel_relaxed(anand->timings, nfc->base + TIMING_REG);
  299. /* Update clock frequency */
  300. if (nfc->cur_clk != anand->clk) {
  301. clk_disable_unprepare(nfc->bus_clk);
  302. ret = clk_set_rate(nfc->bus_clk, anand->clk);
  303. if (ret) {
  304. dev_err(nfc->dev, "Failed to change clock rate\n");
  305. return ret;
  306. }
  307. ret = clk_prepare_enable(nfc->bus_clk);
  308. if (ret) {
  309. dev_err(nfc->dev,
  310. "Failed to re-enable the bus clock\n");
  311. return ret;
  312. }
  313. nfc->cur_clk = anand->clk;
  314. }
  315. return 0;
  316. }
  317. /*
  318. * When using the embedded hardware ECC engine, the controller is in charge of
  319. * feeding the engine with, first, the ECC residue present in the data array.
  320. * A typical read operation is:
  321. * 1/ Assert the read operation by sending the relevant command/address cycles
  322. * but targeting the column of the first ECC bytes in the OOB area instead of
  323. * the main data directly.
  324. * 2/ After having read the relevant number of ECC bytes, the controller uses
  325. * the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
  326. * Register" to move the pointer back at the beginning of the main data.
  327. * 3/ It will read the content of the main area for a given size (pktsize) and
  328. * will feed the ECC engine with this buffer again.
  329. * 4/ The ECC engine derives the ECC bytes for the given data and compare them
  330. * with the ones already received. It eventually trigger status flags and
  331. * then set the "Buffer Read Ready" flag.
  332. * 5/ The corrected data is then available for reading from the data port
  333. * register.
  334. *
  335. * The hardware BCH ECC engine is known to be inconstent in BCH mode and never
  336. * reports uncorrectable errors. Because of this bug, we have to use the
  337. * software BCH implementation in the read path.
  338. */
  339. static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
  340. int oob_required, int page)
  341. {
  342. struct arasan_nfc *nfc = to_anfc(chip->controller);
  343. struct mtd_info *mtd = nand_to_mtd(chip);
  344. struct anand *anand = to_anand(chip);
  345. unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
  346. unsigned int max_bitflips = 0;
  347. dma_addr_t dma_addr;
  348. int step, ret;
  349. struct anfc_op nfc_op = {
  350. .pkt_reg =
  351. PKT_SIZE(chip->ecc.size) |
  352. PKT_STEPS(chip->ecc.steps),
  353. .addr1_reg =
  354. (page & 0xFF) << (8 * (anand->caddr_cycles)) |
  355. (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
  356. .addr2_reg =
  357. ((page >> 16) & 0xFF) |
  358. ADDR2_STRENGTH(anand->strength) |
  359. ADDR2_CS(nfc->native_cs),
  360. .cmd_reg =
  361. CMD_1(NAND_CMD_READ0) |
  362. CMD_2(NAND_CMD_READSTART) |
  363. CMD_PAGE_SIZE(anand->page_sz) |
  364. CMD_DMA_ENABLE |
  365. CMD_NADDRS(anand->caddr_cycles +
  366. anand->raddr_cycles),
  367. .prog_reg = PROG_PGRD,
  368. };
  369. dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
  370. if (dma_mapping_error(nfc->dev, dma_addr)) {
  371. dev_err(nfc->dev, "Buffer mapping error");
  372. return -EIO;
  373. }
  374. writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
  375. writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
  376. anfc_trigger_op(nfc, &nfc_op);
  377. ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
  378. dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
  379. if (ret) {
  380. dev_err(nfc->dev, "Error reading page %d\n", page);
  381. return ret;
  382. }
  383. /* Store the raw OOB bytes as well */
  384. ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
  385. mtd->oobsize, 0);
  386. if (ret)
  387. return ret;
  388. /*
  389. * For each step, compute by softare the BCH syndrome over the raw data.
  390. * Compare the theoretical amount of errors and compare with the
  391. * hardware engine feedback.
  392. */
  393. for (step = 0; step < chip->ecc.steps; step++) {
  394. u8 *raw_buf = &buf[step * chip->ecc.size];
  395. unsigned int bit, byte;
  396. int bf, i;
  397. /* Extract the syndrome, it is not necessarily aligned */
  398. memset(anand->hw_ecc, 0, chip->ecc.bytes);
  399. nand_extract_bits(anand->hw_ecc, 0,
  400. &chip->oob_poi[mtd->oobsize - anand->ecc_total],
  401. anand->ecc_bits * step, anand->ecc_bits);
  402. bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
  403. anand->hw_ecc, NULL, NULL, anand->errloc);
  404. if (!bf) {
  405. continue;
  406. } else if (bf > 0) {
  407. for (i = 0; i < bf; i++) {
  408. /* Only correct the data, not the syndrome */
  409. if (anand->errloc[i] < (chip->ecc.size * 8)) {
  410. bit = BIT(anand->errloc[i] & 7);
  411. byte = anand->errloc[i] >> 3;
  412. raw_buf[byte] ^= bit;
  413. }
  414. }
  415. mtd->ecc_stats.corrected += bf;
  416. max_bitflips = max_t(unsigned int, max_bitflips, bf);
  417. continue;
  418. }
  419. bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
  420. NULL, 0, NULL, 0,
  421. chip->ecc.strength);
  422. if (bf > 0) {
  423. mtd->ecc_stats.corrected += bf;
  424. max_bitflips = max_t(unsigned int, max_bitflips, bf);
  425. memset(raw_buf, 0xFF, chip->ecc.size);
  426. } else if (bf < 0) {
  427. mtd->ecc_stats.failed++;
  428. }
  429. }
  430. return 0;
  431. }
  432. static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
  433. int oob_required, int page)
  434. {
  435. int ret;
  436. ret = anfc_select_target(chip, chip->cur_cs);
  437. if (ret)
  438. return ret;
  439. return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
  440. };
  441. static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
  442. int oob_required, int page)
  443. {
  444. struct anand *anand = to_anand(chip);
  445. struct arasan_nfc *nfc = to_anfc(chip->controller);
  446. struct mtd_info *mtd = nand_to_mtd(chip);
  447. unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
  448. dma_addr_t dma_addr;
  449. u8 status;
  450. int ret;
  451. struct anfc_op nfc_op = {
  452. .pkt_reg =
  453. PKT_SIZE(chip->ecc.size) |
  454. PKT_STEPS(chip->ecc.steps),
  455. .addr1_reg =
  456. (page & 0xFF) << (8 * (anand->caddr_cycles)) |
  457. (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
  458. .addr2_reg =
  459. ((page >> 16) & 0xFF) |
  460. ADDR2_STRENGTH(anand->strength) |
  461. ADDR2_CS(nfc->native_cs),
  462. .cmd_reg =
  463. CMD_1(NAND_CMD_SEQIN) |
  464. CMD_2(NAND_CMD_PAGEPROG) |
  465. CMD_PAGE_SIZE(anand->page_sz) |
  466. CMD_DMA_ENABLE |
  467. CMD_NADDRS(anand->caddr_cycles +
  468. anand->raddr_cycles) |
  469. CMD_ECC_ENABLE,
  470. .prog_reg = PROG_PGPROG,
  471. };
  472. writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
  473. writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
  474. ECC_SP_ADDRS(anand->caddr_cycles),
  475. nfc->base + ECC_SP_REG);
  476. dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
  477. if (dma_mapping_error(nfc->dev, dma_addr)) {
  478. dev_err(nfc->dev, "Buffer mapping error");
  479. return -EIO;
  480. }
  481. writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
  482. writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
  483. anfc_trigger_op(nfc, &nfc_op);
  484. ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
  485. dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
  486. if (ret) {
  487. dev_err(nfc->dev, "Error writing page %d\n", page);
  488. return ret;
  489. }
  490. /* Spare data is not protected */
  491. if (oob_required) {
  492. ret = nand_write_oob_std(chip, page);
  493. if (ret)
  494. return ret;
  495. }
  496. /* Check write status on the chip side */
  497. ret = nand_status_op(chip, &status);
  498. if (ret)
  499. return ret;
  500. if (status & NAND_STATUS_FAIL)
  501. return -EIO;
  502. return 0;
  503. }
  504. static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
  505. int oob_required, int page)
  506. {
  507. int ret;
  508. ret = anfc_select_target(chip, chip->cur_cs);
  509. if (ret)
  510. return ret;
  511. return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
  512. };
  513. /* NAND framework ->exec_op() hooks and related helpers */
  514. static int anfc_parse_instructions(struct nand_chip *chip,
  515. const struct nand_subop *subop,
  516. struct anfc_op *nfc_op)
  517. {
  518. struct arasan_nfc *nfc = to_anfc(chip->controller);
  519. struct anand *anand = to_anand(chip);
  520. const struct nand_op_instr *instr = NULL;
  521. bool first_cmd = true;
  522. unsigned int op_id;
  523. int ret, i;
  524. memset(nfc_op, 0, sizeof(*nfc_op));
  525. nfc_op->addr2_reg = ADDR2_CS(nfc->native_cs);
  526. nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
  527. for (op_id = 0; op_id < subop->ninstrs; op_id++) {
  528. unsigned int offset, naddrs, pktsize;
  529. const u8 *addrs;
  530. u8 *buf;
  531. instr = &subop->instrs[op_id];
  532. switch (instr->type) {
  533. case NAND_OP_CMD_INSTR:
  534. if (first_cmd)
  535. nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
  536. else
  537. nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
  538. first_cmd = false;
  539. break;
  540. case NAND_OP_ADDR_INSTR:
  541. offset = nand_subop_get_addr_start_off(subop, op_id);
  542. naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
  543. addrs = &instr->ctx.addr.addrs[offset];
  544. nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
  545. for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
  546. if (i < 4)
  547. nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
  548. else
  549. nfc_op->addr2_reg |= addrs[i];
  550. }
  551. break;
  552. case NAND_OP_DATA_IN_INSTR:
  553. nfc_op->read = true;
  554. fallthrough;
  555. case NAND_OP_DATA_OUT_INSTR:
  556. offset = nand_subop_get_data_start_off(subop, op_id);
  557. buf = instr->ctx.data.buf.in;
  558. nfc_op->buf = &buf[offset];
  559. nfc_op->len = nand_subop_get_data_len(subop, op_id);
  560. ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
  561. &pktsize);
  562. if (ret)
  563. return ret;
  564. /*
  565. * Number of DATA cycles must be aligned on 4, this
  566. * means the controller might read/write more than
  567. * requested. This is harmless most of the time as extra
  568. * DATA are discarded in the write path and read pointer
  569. * adjusted in the read path.
  570. *
  571. * FIXME: The core should mark operations where
  572. * reading/writing more is allowed so the exec_op()
  573. * implementation can take the right decision when the
  574. * alignment constraint is not met: adjust the number of
  575. * DATA cycles when it's allowed, reject the operation
  576. * otherwise.
  577. */
  578. nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
  579. PKT_STEPS(nfc_op->steps);
  580. break;
  581. case NAND_OP_WAITRDY_INSTR:
  582. nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
  583. break;
  584. }
  585. }
  586. return 0;
  587. }
  588. static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
  589. {
  590. unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
  591. unsigned int last_len = nfc_op->len % 4;
  592. unsigned int offset, dir;
  593. u8 *buf = nfc_op->buf;
  594. int ret, i;
  595. for (i = 0; i < nfc_op->steps; i++) {
  596. dir = nfc_op->read ? READ_READY : WRITE_READY;
  597. ret = anfc_wait_for_event(nfc, dir);
  598. if (ret) {
  599. dev_err(nfc->dev, "PIO %s ready signal not received\n",
  600. nfc_op->read ? "Read" : "Write");
  601. return ret;
  602. }
  603. offset = i * (dwords * 4);
  604. if (nfc_op->read)
  605. ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
  606. dwords);
  607. else
  608. iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
  609. dwords);
  610. }
  611. if (last_len) {
  612. u32 remainder;
  613. offset = nfc_op->len - last_len;
  614. if (nfc_op->read) {
  615. remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
  616. memcpy(&buf[offset], &remainder, last_len);
  617. } else {
  618. memcpy(&remainder, &buf[offset], last_len);
  619. writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
  620. }
  621. }
  622. return anfc_wait_for_event(nfc, XFER_COMPLETE);
  623. }
  624. static int anfc_misc_data_type_exec(struct nand_chip *chip,
  625. const struct nand_subop *subop,
  626. u32 prog_reg)
  627. {
  628. struct arasan_nfc *nfc = to_anfc(chip->controller);
  629. struct anfc_op nfc_op = {};
  630. int ret;
  631. ret = anfc_parse_instructions(chip, subop, &nfc_op);
  632. if (ret)
  633. return ret;
  634. nfc_op.prog_reg = prog_reg;
  635. anfc_trigger_op(nfc, &nfc_op);
  636. if (nfc_op.rdy_timeout_ms) {
  637. ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
  638. if (ret)
  639. return ret;
  640. }
  641. return anfc_rw_pio_op(nfc, &nfc_op);
  642. }
  643. static int anfc_param_read_type_exec(struct nand_chip *chip,
  644. const struct nand_subop *subop)
  645. {
  646. return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
  647. }
  648. static int anfc_data_read_type_exec(struct nand_chip *chip,
  649. const struct nand_subop *subop)
  650. {
  651. u32 prog_reg = PROG_PGRD;
  652. /*
  653. * Experience shows that while in SDR mode sending a CHANGE READ COLUMN
  654. * command through the READ PAGE "type" always works fine, when in
  655. * NV-DDR mode the same command simply fails. However, it was also
  656. * spotted that any CHANGE READ COLUMN command sent through the CHANGE
  657. * READ COLUMN ENHANCED "type" would correctly work in both cases (SDR
  658. * and NV-DDR). So, for simplicity, let's program the controller with
  659. * the CHANGE READ COLUMN ENHANCED "type" whenever we are requested to
  660. * perform a CHANGE READ COLUMN operation.
  661. */
  662. if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_RNDOUT &&
  663. subop->instrs[2].ctx.cmd.opcode == NAND_CMD_RNDOUTSTART)
  664. prog_reg = PROG_CHG_RD_COL_ENH;
  665. return anfc_misc_data_type_exec(chip, subop, prog_reg);
  666. }
  667. static int anfc_param_write_type_exec(struct nand_chip *chip,
  668. const struct nand_subop *subop)
  669. {
  670. return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
  671. }
  672. static int anfc_data_write_type_exec(struct nand_chip *chip,
  673. const struct nand_subop *subop)
  674. {
  675. return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
  676. }
  677. static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
  678. const struct nand_subop *subop,
  679. u32 prog_reg)
  680. {
  681. struct arasan_nfc *nfc = to_anfc(chip->controller);
  682. struct anfc_op nfc_op = {};
  683. int ret;
  684. ret = anfc_parse_instructions(chip, subop, &nfc_op);
  685. if (ret)
  686. return ret;
  687. nfc_op.prog_reg = prog_reg;
  688. anfc_trigger_op(nfc, &nfc_op);
  689. ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
  690. if (ret)
  691. return ret;
  692. if (nfc_op.rdy_timeout_ms)
  693. ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
  694. return ret;
  695. }
  696. static int anfc_status_type_exec(struct nand_chip *chip,
  697. const struct nand_subop *subop)
  698. {
  699. struct arasan_nfc *nfc = to_anfc(chip->controller);
  700. u32 tmp;
  701. int ret;
  702. /* See anfc_check_op() for details about this constraint */
  703. if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
  704. return -ENOTSUPP;
  705. ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
  706. if (ret)
  707. return ret;
  708. tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
  709. memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
  710. return 0;
  711. }
  712. static int anfc_reset_type_exec(struct nand_chip *chip,
  713. const struct nand_subop *subop)
  714. {
  715. return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
  716. }
  717. static int anfc_erase_type_exec(struct nand_chip *chip,
  718. const struct nand_subop *subop)
  719. {
  720. return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
  721. }
  722. static int anfc_wait_type_exec(struct nand_chip *chip,
  723. const struct nand_subop *subop)
  724. {
  725. struct arasan_nfc *nfc = to_anfc(chip->controller);
  726. struct anfc_op nfc_op = {};
  727. int ret;
  728. ret = anfc_parse_instructions(chip, subop, &nfc_op);
  729. if (ret)
  730. return ret;
  731. return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
  732. }
  733. static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
  734. NAND_OP_PARSER_PATTERN(
  735. anfc_param_read_type_exec,
  736. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  737. NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
  738. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
  739. NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
  740. NAND_OP_PARSER_PATTERN(
  741. anfc_param_write_type_exec,
  742. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  743. NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
  744. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
  745. NAND_OP_PARSER_PATTERN(
  746. anfc_data_read_type_exec,
  747. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  748. NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
  749. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  750. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
  751. NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
  752. NAND_OP_PARSER_PATTERN(
  753. anfc_data_write_type_exec,
  754. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  755. NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
  756. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
  757. NAND_OP_PARSER_PAT_CMD_ELEM(false)),
  758. NAND_OP_PARSER_PATTERN(
  759. anfc_reset_type_exec,
  760. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  761. NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
  762. NAND_OP_PARSER_PATTERN(
  763. anfc_erase_type_exec,
  764. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  765. NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
  766. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  767. NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
  768. NAND_OP_PARSER_PATTERN(
  769. anfc_status_type_exec,
  770. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  771. NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
  772. NAND_OP_PARSER_PATTERN(
  773. anfc_wait_type_exec,
  774. NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
  775. );
  776. static int anfc_check_op(struct nand_chip *chip,
  777. const struct nand_operation *op)
  778. {
  779. const struct nand_op_instr *instr;
  780. int op_id;
  781. /*
  782. * The controller abstracts all the NAND operations and do not support
  783. * data only operations.
  784. *
  785. * TODO: The nand_op_parser framework should be extended to
  786. * support custom checks on DATA instructions.
  787. */
  788. for (op_id = 0; op_id < op->ninstrs; op_id++) {
  789. instr = &op->instrs[op_id];
  790. switch (instr->type) {
  791. case NAND_OP_ADDR_INSTR:
  792. if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
  793. return -ENOTSUPP;
  794. break;
  795. case NAND_OP_DATA_IN_INSTR:
  796. case NAND_OP_DATA_OUT_INSTR:
  797. if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
  798. return -ENOTSUPP;
  799. if (anfc_pkt_len_config(instr->ctx.data.len, NULL, NULL))
  800. return -ENOTSUPP;
  801. break;
  802. default:
  803. break;
  804. }
  805. }
  806. /*
  807. * The controller does not allow to proceed with a CMD+DATA_IN cycle
  808. * manually on the bus by reading data from the data register. Instead,
  809. * the controller abstract a status read operation with its own status
  810. * register after ordering a read status operation. Hence, we cannot
  811. * support any CMD+DATA_IN operation other than a READ STATUS.
  812. *
  813. * TODO: The nand_op_parser() framework should be extended to describe
  814. * fixed patterns instead of open-coding this check here.
  815. */
  816. if (op->ninstrs == 2 &&
  817. op->instrs[0].type == NAND_OP_CMD_INSTR &&
  818. op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
  819. op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
  820. return -ENOTSUPP;
  821. return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
  822. }
  823. static int anfc_exec_op(struct nand_chip *chip,
  824. const struct nand_operation *op,
  825. bool check_only)
  826. {
  827. int ret;
  828. if (check_only)
  829. return anfc_check_op(chip, op);
  830. ret = anfc_select_target(chip, op->cs);
  831. if (ret)
  832. return ret;
  833. return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
  834. }
  835. static int anfc_setup_interface(struct nand_chip *chip, int target,
  836. const struct nand_interface_config *conf)
  837. {
  838. struct anand *anand = to_anand(chip);
  839. struct arasan_nfc *nfc = to_anfc(chip->controller);
  840. struct device_node *np = nfc->dev->of_node;
  841. const struct nand_sdr_timings *sdr;
  842. const struct nand_nvddr_timings *nvddr;
  843. unsigned int tccs_min, dqs_mode, fast_tcad;
  844. if (nand_interface_is_nvddr(conf)) {
  845. nvddr = nand_get_nvddr_timings(conf);
  846. if (IS_ERR(nvddr))
  847. return PTR_ERR(nvddr);
  848. /*
  849. * The controller only supports data payload requests which are
  850. * a multiple of 4. In practice, most data accesses are 4-byte
  851. * aligned and this is not an issue. However, rounding up will
  852. * simply be refused by the controller if we reached the end of
  853. * the device *and* we are using the NV-DDR interface(!). In
  854. * this situation, unaligned data requests ending at the device
  855. * boundary will confuse the controller and cannot be performed.
  856. *
  857. * This is something that happens in nand_read_subpage() when
  858. * selecting software ECC support and must be avoided.
  859. */
  860. if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT)
  861. return -ENOTSUPP;
  862. } else {
  863. sdr = nand_get_sdr_timings(conf);
  864. if (IS_ERR(sdr))
  865. return PTR_ERR(sdr);
  866. }
  867. if (target < 0)
  868. return 0;
  869. if (nand_interface_is_sdr(conf)) {
  870. anand->data_iface = DIFACE_SDR |
  871. DIFACE_SDR_MODE(conf->timings.mode);
  872. anand->timings = 0;
  873. } else {
  874. anand->data_iface = DIFACE_NVDDR |
  875. DIFACE_DDR_MODE(conf->timings.mode);
  876. if (conf->timings.nvddr.tCCS_min <= 100000)
  877. tccs_min = TCCS_TIME_100NS;
  878. else if (conf->timings.nvddr.tCCS_min <= 200000)
  879. tccs_min = TCCS_TIME_200NS;
  880. else if (conf->timings.nvddr.tCCS_min <= 300000)
  881. tccs_min = TCCS_TIME_300NS;
  882. else
  883. tccs_min = TCCS_TIME_500NS;
  884. fast_tcad = 0;
  885. if (conf->timings.nvddr.tCAD_min < 45000)
  886. fast_tcad = FAST_TCAD;
  887. switch (conf->timings.mode) {
  888. case 5:
  889. case 4:
  890. dqs_mode = 2;
  891. break;
  892. case 3:
  893. dqs_mode = 3;
  894. break;
  895. case 2:
  896. dqs_mode = 4;
  897. break;
  898. case 1:
  899. dqs_mode = 5;
  900. break;
  901. case 0:
  902. default:
  903. dqs_mode = 6;
  904. break;
  905. }
  906. anand->timings = tccs_min | fast_tcad |
  907. DQS_BUFF_SEL_IN(dqs_mode) |
  908. DQS_BUFF_SEL_OUT(dqs_mode);
  909. }
  910. if (nand_interface_is_sdr(conf)) {
  911. anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
  912. } else {
  913. /* ONFI timings are defined in picoseconds */
  914. anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
  915. conf->timings.nvddr.tCK_min);
  916. }
  917. /*
  918. * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
  919. * with f > 90MHz (default clock is 100MHz) but signals are unstable
  920. * with higher modes. Hence we decrease a little bit the clock rate to
  921. * 80MHz when using SDR modes 2-5 with this SoC.
  922. */
  923. if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
  924. nand_interface_is_sdr(conf) && conf->timings.mode >= 2)
  925. anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
  926. return 0;
  927. }
  928. static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
  929. {
  930. unsigned int bch_gf_mag, ecc_bits;
  931. switch (step_size) {
  932. case SZ_512:
  933. bch_gf_mag = 13;
  934. break;
  935. case SZ_1K:
  936. bch_gf_mag = 14;
  937. break;
  938. default:
  939. return -EINVAL;
  940. }
  941. ecc_bits = bch_gf_mag * strength;
  942. return DIV_ROUND_UP(ecc_bits, 8);
  943. }
  944. static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
  945. static const int anfc_hw_ecc_1024_strengths[] = {24};
  946. static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
  947. {
  948. .stepsize = SZ_512,
  949. .strengths = anfc_hw_ecc_512_strengths,
  950. .nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
  951. },
  952. {
  953. .stepsize = SZ_1K,
  954. .strengths = anfc_hw_ecc_1024_strengths,
  955. .nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
  956. },
  957. };
  958. static const struct nand_ecc_caps anfc_hw_ecc_caps = {
  959. .stepinfos = anfc_hw_ecc_step_infos,
  960. .nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
  961. .calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
  962. };
  963. static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
  964. struct nand_chip *chip)
  965. {
  966. struct anand *anand = to_anand(chip);
  967. struct mtd_info *mtd = nand_to_mtd(chip);
  968. struct nand_ecc_ctrl *ecc = &chip->ecc;
  969. unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
  970. int ret;
  971. switch (mtd->writesize) {
  972. case SZ_512:
  973. case SZ_2K:
  974. case SZ_4K:
  975. case SZ_8K:
  976. case SZ_16K:
  977. break;
  978. default:
  979. dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
  980. return -EINVAL;
  981. }
  982. ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
  983. if (ret)
  984. return ret;
  985. switch (ecc->strength) {
  986. case 12:
  987. anand->strength = 0x1;
  988. break;
  989. case 8:
  990. anand->strength = 0x2;
  991. break;
  992. case 4:
  993. anand->strength = 0x3;
  994. break;
  995. case 24:
  996. anand->strength = 0x4;
  997. break;
  998. default:
  999. dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
  1000. return -EINVAL;
  1001. }
  1002. switch (ecc->size) {
  1003. case SZ_512:
  1004. bch_gf_mag = 13;
  1005. bch_prim_poly = 0x201b;
  1006. break;
  1007. case SZ_1K:
  1008. bch_gf_mag = 14;
  1009. bch_prim_poly = 0x4443;
  1010. break;
  1011. default:
  1012. dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
  1013. return -EINVAL;
  1014. }
  1015. mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
  1016. ecc->steps = mtd->writesize / ecc->size;
  1017. ecc->algo = NAND_ECC_ALGO_BCH;
  1018. anand->ecc_bits = bch_gf_mag * ecc->strength;
  1019. ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
  1020. anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
  1021. ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
  1022. anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
  1023. ECC_CONF_LEN(anand->ecc_total) |
  1024. ECC_CONF_BCH_EN;
  1025. anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
  1026. sizeof(*anand->errloc), GFP_KERNEL);
  1027. if (!anand->errloc)
  1028. return -ENOMEM;
  1029. anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
  1030. if (!anand->hw_ecc)
  1031. return -ENOMEM;
  1032. /* Enforce bit swapping to fit the hardware */
  1033. anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
  1034. if (!anand->bch)
  1035. return -EINVAL;
  1036. ecc->read_page = anfc_sel_read_page_hw_ecc;
  1037. ecc->write_page = anfc_sel_write_page_hw_ecc;
  1038. return 0;
  1039. }
  1040. static int anfc_attach_chip(struct nand_chip *chip)
  1041. {
  1042. struct anand *anand = to_anand(chip);
  1043. struct arasan_nfc *nfc = to_anfc(chip->controller);
  1044. struct mtd_info *mtd = nand_to_mtd(chip);
  1045. int ret = 0;
  1046. if (mtd->writesize <= SZ_512)
  1047. anand->caddr_cycles = 1;
  1048. else
  1049. anand->caddr_cycles = 2;
  1050. if (chip->options & NAND_ROW_ADDR_3)
  1051. anand->raddr_cycles = 3;
  1052. else
  1053. anand->raddr_cycles = 2;
  1054. switch (mtd->writesize) {
  1055. case 512:
  1056. anand->page_sz = 0;
  1057. break;
  1058. case 1024:
  1059. anand->page_sz = 5;
  1060. break;
  1061. case 2048:
  1062. anand->page_sz = 1;
  1063. break;
  1064. case 4096:
  1065. anand->page_sz = 2;
  1066. break;
  1067. case 8192:
  1068. anand->page_sz = 3;
  1069. break;
  1070. case 16384:
  1071. anand->page_sz = 4;
  1072. break;
  1073. default:
  1074. return -EINVAL;
  1075. }
  1076. /* These hooks are valid for all ECC providers */
  1077. chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
  1078. chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
  1079. switch (chip->ecc.engine_type) {
  1080. case NAND_ECC_ENGINE_TYPE_NONE:
  1081. case NAND_ECC_ENGINE_TYPE_SOFT:
  1082. case NAND_ECC_ENGINE_TYPE_ON_DIE:
  1083. break;
  1084. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  1085. ret = anfc_init_hw_ecc_controller(nfc, chip);
  1086. break;
  1087. default:
  1088. dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
  1089. chip->ecc.engine_type);
  1090. return -EINVAL;
  1091. }
  1092. return ret;
  1093. }
  1094. static void anfc_detach_chip(struct nand_chip *chip)
  1095. {
  1096. struct anand *anand = to_anand(chip);
  1097. if (anand->bch)
  1098. bch_free(anand->bch);
  1099. }
  1100. static const struct nand_controller_ops anfc_ops = {
  1101. .exec_op = anfc_exec_op,
  1102. .setup_interface = anfc_setup_interface,
  1103. .attach_chip = anfc_attach_chip,
  1104. .detach_chip = anfc_detach_chip,
  1105. };
  1106. static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
  1107. {
  1108. struct anand *anand;
  1109. struct nand_chip *chip;
  1110. struct mtd_info *mtd;
  1111. int rb, ret, i;
  1112. anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
  1113. if (!anand)
  1114. return -ENOMEM;
  1115. /* Chip-select init */
  1116. anand->ncs_idx = of_property_count_elems_of_size(np, "reg", sizeof(u32));
  1117. if (anand->ncs_idx <= 0 || anand->ncs_idx > nfc->ncs) {
  1118. dev_err(nfc->dev, "Invalid reg property\n");
  1119. return -EINVAL;
  1120. }
  1121. anand->cs_idx = devm_kcalloc(nfc->dev, anand->ncs_idx,
  1122. sizeof(*anand->cs_idx), GFP_KERNEL);
  1123. if (!anand->cs_idx)
  1124. return -ENOMEM;
  1125. for (i = 0; i < anand->ncs_idx; i++) {
  1126. ret = of_property_read_u32_index(np, "reg", i,
  1127. &anand->cs_idx[i]);
  1128. if (ret) {
  1129. dev_err(nfc->dev, "invalid CS property: %d\n", ret);
  1130. return ret;
  1131. }
  1132. }
  1133. /* Ready-busy init */
  1134. ret = of_property_read_u32(np, "nand-rb", &rb);
  1135. if (ret)
  1136. return ret;
  1137. if (rb >= ANFC_MAX_CS) {
  1138. dev_err(nfc->dev, "Wrong RB %d\n", rb);
  1139. return -EINVAL;
  1140. }
  1141. anand->rb = rb;
  1142. chip = &anand->chip;
  1143. mtd = nand_to_mtd(chip);
  1144. mtd->dev.parent = nfc->dev;
  1145. chip->controller = &nfc->controller;
  1146. chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
  1147. NAND_USES_DMA;
  1148. nand_set_flash_node(chip, np);
  1149. if (!mtd->name) {
  1150. dev_err(nfc->dev, "NAND label property is mandatory\n");
  1151. return -EINVAL;
  1152. }
  1153. ret = nand_scan(chip, anand->ncs_idx);
  1154. if (ret) {
  1155. dev_err(nfc->dev, "Scan operation failed\n");
  1156. return ret;
  1157. }
  1158. ret = mtd_device_register(mtd, NULL, 0);
  1159. if (ret) {
  1160. nand_cleanup(chip);
  1161. return ret;
  1162. }
  1163. list_add_tail(&anand->node, &nfc->chips);
  1164. return 0;
  1165. }
  1166. static void anfc_chips_cleanup(struct arasan_nfc *nfc)
  1167. {
  1168. struct anand *anand, *tmp;
  1169. struct nand_chip *chip;
  1170. int ret;
  1171. list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
  1172. chip = &anand->chip;
  1173. ret = mtd_device_unregister(nand_to_mtd(chip));
  1174. WARN_ON(ret);
  1175. nand_cleanup(chip);
  1176. list_del(&anand->node);
  1177. }
  1178. }
  1179. static int anfc_chips_init(struct arasan_nfc *nfc)
  1180. {
  1181. struct device_node *np = nfc->dev->of_node, *nand_np;
  1182. int nchips = of_get_child_count(np);
  1183. int ret;
  1184. if (!nchips) {
  1185. dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
  1186. nchips);
  1187. return -EINVAL;
  1188. }
  1189. for_each_child_of_node(np, nand_np) {
  1190. ret = anfc_chip_init(nfc, nand_np);
  1191. if (ret) {
  1192. of_node_put(nand_np);
  1193. anfc_chips_cleanup(nfc);
  1194. break;
  1195. }
  1196. }
  1197. return ret;
  1198. }
  1199. static void anfc_reset(struct arasan_nfc *nfc)
  1200. {
  1201. /* Disable interrupt signals */
  1202. writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
  1203. /* Enable interrupt status */
  1204. writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
  1205. nfc->cur_cs = -1;
  1206. }
  1207. static int anfc_parse_cs(struct arasan_nfc *nfc)
  1208. {
  1209. int ret;
  1210. /* Check the gpio-cs property */
  1211. ret = rawnand_dt_parse_gpio_cs(nfc->dev, &nfc->cs_array, &nfc->ncs);
  1212. if (ret)
  1213. return ret;
  1214. /*
  1215. * The controller native CS cannot be both disabled at the same time.
  1216. * Hence, only one native CS can be used if GPIO CS are needed, so that
  1217. * the other is selected when a non-native CS must be asserted (not
  1218. * wired physically or configured as GPIO instead of NAND CS). In this
  1219. * case, the "not" chosen CS is assigned to nfc->spare_cs and selected
  1220. * whenever a GPIO CS must be asserted.
  1221. */
  1222. if (nfc->cs_array && nfc->ncs > 2) {
  1223. if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
  1224. dev_err(nfc->dev,
  1225. "Assign a single native CS when using GPIOs\n");
  1226. return -EINVAL;
  1227. }
  1228. if (nfc->cs_array[0])
  1229. nfc->spare_cs = 0;
  1230. else
  1231. nfc->spare_cs = 1;
  1232. }
  1233. if (!nfc->cs_array) {
  1234. nfc->cs_array = anfc_default_cs_array;
  1235. nfc->ncs = ANFC_MAX_CS;
  1236. return 0;
  1237. }
  1238. return 0;
  1239. }
  1240. static int anfc_probe(struct platform_device *pdev)
  1241. {
  1242. struct arasan_nfc *nfc;
  1243. int ret;
  1244. nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
  1245. if (!nfc)
  1246. return -ENOMEM;
  1247. nfc->dev = &pdev->dev;
  1248. nand_controller_init(&nfc->controller);
  1249. nfc->controller.ops = &anfc_ops;
  1250. INIT_LIST_HEAD(&nfc->chips);
  1251. nfc->base = devm_platform_ioremap_resource(pdev, 0);
  1252. if (IS_ERR(nfc->base))
  1253. return PTR_ERR(nfc->base);
  1254. anfc_reset(nfc);
  1255. nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
  1256. if (IS_ERR(nfc->controller_clk))
  1257. return PTR_ERR(nfc->controller_clk);
  1258. nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
  1259. if (IS_ERR(nfc->bus_clk))
  1260. return PTR_ERR(nfc->bus_clk);
  1261. ret = clk_prepare_enable(nfc->controller_clk);
  1262. if (ret)
  1263. return ret;
  1264. ret = clk_prepare_enable(nfc->bus_clk);
  1265. if (ret)
  1266. goto disable_controller_clk;
  1267. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  1268. if (ret)
  1269. goto disable_bus_clk;
  1270. ret = anfc_parse_cs(nfc);
  1271. if (ret)
  1272. goto disable_bus_clk;
  1273. ret = anfc_chips_init(nfc);
  1274. if (ret)
  1275. goto disable_bus_clk;
  1276. platform_set_drvdata(pdev, nfc);
  1277. return 0;
  1278. disable_bus_clk:
  1279. clk_disable_unprepare(nfc->bus_clk);
  1280. disable_controller_clk:
  1281. clk_disable_unprepare(nfc->controller_clk);
  1282. return ret;
  1283. }
  1284. static int anfc_remove(struct platform_device *pdev)
  1285. {
  1286. struct arasan_nfc *nfc = platform_get_drvdata(pdev);
  1287. anfc_chips_cleanup(nfc);
  1288. clk_disable_unprepare(nfc->bus_clk);
  1289. clk_disable_unprepare(nfc->controller_clk);
  1290. return 0;
  1291. }
  1292. static const struct of_device_id anfc_ids[] = {
  1293. {
  1294. .compatible = "xlnx,zynqmp-nand-controller",
  1295. },
  1296. {
  1297. .compatible = "arasan,nfc-v3p10",
  1298. },
  1299. {}
  1300. };
  1301. MODULE_DEVICE_TABLE(of, anfc_ids);
  1302. static struct platform_driver anfc_driver = {
  1303. .driver = {
  1304. .name = "arasan-nand-controller",
  1305. .of_match_table = anfc_ids,
  1306. },
  1307. .probe = anfc_probe,
  1308. .remove = anfc_remove,
  1309. };
  1310. module_platform_driver(anfc_driver);
  1311. MODULE_LICENSE("GPL v2");
  1312. MODULE_AUTHOR("Punnaiah Choudary Kalluri <[email protected]>");
  1313. MODULE_AUTHOR("Naga Sureshkumar Relli <[email protected]>");
  1314. MODULE_AUTHOR("Miquel Raynal <[email protected]>");
  1315. MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");