lpc32xx_slc.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * NXP LPC32XX NAND SLC driver
  4. *
  5. * Authors:
  6. * Kevin Wells <[email protected]>
  7. * Roland Stigge <[email protected]>
  8. *
  9. * Copyright © 2011 NXP Semiconductors
  10. * Copyright © 2012 Roland Stigge
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/mtd/mtd.h>
  16. #include <linux/mtd/rawnand.h>
  17. #include <linux/mtd/partitions.h>
  18. #include <linux/clk.h>
  19. #include <linux/err.h>
  20. #include <linux/delay.h>
  21. #include <linux/io.h>
  22. #include <linux/mm.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/dmaengine.h>
  25. #include <linux/gpio.h>
  26. #include <linux/of.h>
  27. #include <linux/of_gpio.h>
  28. #include <linux/mtd/lpc32xx_slc.h>
  29. #define LPC32XX_MODNAME "lpc32xx-nand"
  30. /**********************************************************************
  31. * SLC NAND controller register offsets
  32. **********************************************************************/
  33. #define SLC_DATA(x) (x + 0x000)
  34. #define SLC_ADDR(x) (x + 0x004)
  35. #define SLC_CMD(x) (x + 0x008)
  36. #define SLC_STOP(x) (x + 0x00C)
  37. #define SLC_CTRL(x) (x + 0x010)
  38. #define SLC_CFG(x) (x + 0x014)
  39. #define SLC_STAT(x) (x + 0x018)
  40. #define SLC_INT_STAT(x) (x + 0x01C)
  41. #define SLC_IEN(x) (x + 0x020)
  42. #define SLC_ISR(x) (x + 0x024)
  43. #define SLC_ICR(x) (x + 0x028)
  44. #define SLC_TAC(x) (x + 0x02C)
  45. #define SLC_TC(x) (x + 0x030)
  46. #define SLC_ECC(x) (x + 0x034)
  47. #define SLC_DMA_DATA(x) (x + 0x038)
  48. /**********************************************************************
  49. * slc_ctrl register definitions
  50. **********************************************************************/
  51. #define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
  52. #define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
  53. #define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
  54. /**********************************************************************
  55. * slc_cfg register definitions
  56. **********************************************************************/
  57. #define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
  58. #define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
  59. #define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
  60. #define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
  61. #define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
  62. #define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
  63. /**********************************************************************
  64. * slc_stat register definitions
  65. **********************************************************************/
  66. #define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
  67. #define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
  68. #define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
  69. /**********************************************************************
  70. * slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
  71. **********************************************************************/
  72. #define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
  73. #define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
  74. /**********************************************************************
  75. * slc_tac register definitions
  76. **********************************************************************/
  77. /* Computation of clock cycles on basis of controller and device clock rates */
  78. #define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
  79. /* Clock setting for RDY write sample wait time in 2*n clocks */
  80. #define SLCTAC_WDR(n) (((n) & 0xF) << 28)
  81. /* Write pulse width in clock cycles, 1 to 16 clocks */
  82. #define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24))
  83. /* Write hold time of control and data signals, 1 to 16 clocks */
  84. #define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20))
  85. /* Write setup time of control and data signals, 1 to 16 clocks */
  86. #define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16))
  87. /* Clock setting for RDY read sample wait time in 2*n clocks */
  88. #define SLCTAC_RDR(n) (((n) & 0xF) << 12)
  89. /* Read pulse width in clock cycles, 1 to 16 clocks */
  90. #define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8))
  91. /* Read hold time of control and data signals, 1 to 16 clocks */
  92. #define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4))
  93. /* Read setup time of control and data signals, 1 to 16 clocks */
  94. #define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0))
  95. /**********************************************************************
  96. * slc_ecc register definitions
  97. **********************************************************************/
  98. /* ECC line party fetch macro */
  99. #define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
  100. #define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
  101. /*
  102. * DMA requires storage space for the DMA local buffer and the hardware ECC
  103. * storage area. The DMA local buffer is only used if DMA mapping fails
  104. * during runtime.
  105. */
  106. #define LPC32XX_DMA_DATA_SIZE 4096
  107. #define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
  108. /* Number of bytes used for ECC stored in NAND per 256 bytes */
  109. #define LPC32XX_SLC_DEV_ECC_BYTES 3
  110. /*
  111. * If the NAND base clock frequency can't be fetched, this frequency will be
  112. * used instead as the base. This rate is used to setup the timing registers
  113. * used for NAND accesses.
  114. */
  115. #define LPC32XX_DEF_BUS_RATE 133250000
  116. /* Milliseconds for DMA FIFO timeout (unlikely anyway) */
  117. #define LPC32XX_DMA_TIMEOUT 100
  118. /*
  119. * NAND ECC Layout for small page NAND devices
  120. * Note: For large and huge page devices, the default layouts are used
  121. */
  122. static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
  123. struct mtd_oob_region *oobregion)
  124. {
  125. if (section)
  126. return -ERANGE;
  127. oobregion->length = 6;
  128. oobregion->offset = 10;
  129. return 0;
  130. }
  131. static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
  132. struct mtd_oob_region *oobregion)
  133. {
  134. if (section > 1)
  135. return -ERANGE;
  136. if (!section) {
  137. oobregion->offset = 0;
  138. oobregion->length = 4;
  139. } else {
  140. oobregion->offset = 6;
  141. oobregion->length = 4;
  142. }
  143. return 0;
  144. }
  145. static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
  146. .ecc = lpc32xx_ooblayout_ecc,
  147. .free = lpc32xx_ooblayout_free,
  148. };
  149. static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
  150. static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
  151. /*
  152. * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
  153. * Note: Large page devices used the default layout
  154. */
  155. static struct nand_bbt_descr bbt_smallpage_main_descr = {
  156. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  157. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  158. .offs = 0,
  159. .len = 4,
  160. .veroffs = 6,
  161. .maxblocks = 4,
  162. .pattern = bbt_pattern
  163. };
  164. static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
  165. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  166. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  167. .offs = 0,
  168. .len = 4,
  169. .veroffs = 6,
  170. .maxblocks = 4,
  171. .pattern = mirror_pattern
  172. };
  173. /*
  174. * NAND platform configuration structure
  175. */
  176. struct lpc32xx_nand_cfg_slc {
  177. uint32_t wdr_clks;
  178. uint32_t wwidth;
  179. uint32_t whold;
  180. uint32_t wsetup;
  181. uint32_t rdr_clks;
  182. uint32_t rwidth;
  183. uint32_t rhold;
  184. uint32_t rsetup;
  185. int wp_gpio;
  186. struct mtd_partition *parts;
  187. unsigned num_parts;
  188. };
  189. struct lpc32xx_nand_host {
  190. struct nand_chip nand_chip;
  191. struct lpc32xx_slc_platform_data *pdata;
  192. struct clk *clk;
  193. void __iomem *io_base;
  194. struct lpc32xx_nand_cfg_slc *ncfg;
  195. struct completion comp;
  196. struct dma_chan *dma_chan;
  197. uint32_t dma_buf_len;
  198. struct dma_slave_config dma_slave_config;
  199. struct scatterlist sgl;
  200. /*
  201. * DMA and CPU addresses of ECC work area and data buffer
  202. */
  203. uint32_t *ecc_buf;
  204. uint8_t *data_buf;
  205. dma_addr_t io_base_dma;
  206. };
  207. static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
  208. {
  209. uint32_t clkrate, tmp;
  210. /* Reset SLC controller */
  211. writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
  212. udelay(1000);
  213. /* Basic setup */
  214. writel(0, SLC_CFG(host->io_base));
  215. writel(0, SLC_IEN(host->io_base));
  216. writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
  217. SLC_ICR(host->io_base));
  218. /* Get base clock for SLC block */
  219. clkrate = clk_get_rate(host->clk);
  220. if (clkrate == 0)
  221. clkrate = LPC32XX_DEF_BUS_RATE;
  222. /* Compute clock setup values */
  223. tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
  224. SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
  225. SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
  226. SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
  227. SLCTAC_RDR(host->ncfg->rdr_clks) |
  228. SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
  229. SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
  230. SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
  231. writel(tmp, SLC_TAC(host->io_base));
  232. }
  233. /*
  234. * Hardware specific access to control lines
  235. */
  236. static void lpc32xx_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
  237. unsigned int ctrl)
  238. {
  239. uint32_t tmp;
  240. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  241. /* Does CE state need to be changed? */
  242. tmp = readl(SLC_CFG(host->io_base));
  243. if (ctrl & NAND_NCE)
  244. tmp |= SLCCFG_CE_LOW;
  245. else
  246. tmp &= ~SLCCFG_CE_LOW;
  247. writel(tmp, SLC_CFG(host->io_base));
  248. if (cmd != NAND_CMD_NONE) {
  249. if (ctrl & NAND_CLE)
  250. writel(cmd, SLC_CMD(host->io_base));
  251. else
  252. writel(cmd, SLC_ADDR(host->io_base));
  253. }
  254. }
  255. /*
  256. * Read the Device Ready pin
  257. */
  258. static int lpc32xx_nand_device_ready(struct nand_chip *chip)
  259. {
  260. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  261. int rdy = 0;
  262. if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
  263. rdy = 1;
  264. return rdy;
  265. }
  266. /*
  267. * Enable NAND write protect
  268. */
  269. static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
  270. {
  271. if (gpio_is_valid(host->ncfg->wp_gpio))
  272. gpio_set_value(host->ncfg->wp_gpio, 0);
  273. }
  274. /*
  275. * Disable NAND write protect
  276. */
  277. static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
  278. {
  279. if (gpio_is_valid(host->ncfg->wp_gpio))
  280. gpio_set_value(host->ncfg->wp_gpio, 1);
  281. }
  282. /*
  283. * Prepares SLC for transfers with H/W ECC enabled
  284. */
  285. static void lpc32xx_nand_ecc_enable(struct nand_chip *chip, int mode)
  286. {
  287. /* Hardware ECC is enabled automatically in hardware as needed */
  288. }
  289. /*
  290. * Calculates the ECC for the data
  291. */
  292. static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
  293. const unsigned char *buf,
  294. unsigned char *code)
  295. {
  296. /*
  297. * ECC is calculated automatically in hardware during syndrome read
  298. * and write operations, so it doesn't need to be calculated here.
  299. */
  300. return 0;
  301. }
  302. /*
  303. * Read a single byte from NAND device
  304. */
  305. static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
  306. {
  307. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  308. return (uint8_t)readl(SLC_DATA(host->io_base));
  309. }
  310. /*
  311. * Simple device read without ECC
  312. */
  313. static void lpc32xx_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
  314. {
  315. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  316. /* Direct device read with no ECC */
  317. while (len-- > 0)
  318. *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
  319. }
  320. /*
  321. * Simple device write without ECC
  322. */
  323. static void lpc32xx_nand_write_buf(struct nand_chip *chip, const uint8_t *buf,
  324. int len)
  325. {
  326. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  327. /* Direct device write with no ECC */
  328. while (len-- > 0)
  329. writel((uint32_t)*buf++, SLC_DATA(host->io_base));
  330. }
  331. /*
  332. * Read the OOB data from the device without ECC using FIFO method
  333. */
  334. static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
  335. {
  336. struct mtd_info *mtd = nand_to_mtd(chip);
  337. return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
  338. }
  339. /*
  340. * Write the OOB data to the device without ECC using FIFO method
  341. */
  342. static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
  343. {
  344. struct mtd_info *mtd = nand_to_mtd(chip);
  345. return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
  346. mtd->oobsize);
  347. }
  348. /*
  349. * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
  350. */
  351. static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
  352. {
  353. int i;
  354. for (i = 0; i < (count * 3); i += 3) {
  355. uint32_t ce = ecc[i / 3];
  356. ce = ~(ce << 2) & 0xFFFFFF;
  357. spare[i + 2] = (uint8_t)(ce & 0xFF);
  358. ce >>= 8;
  359. spare[i + 1] = (uint8_t)(ce & 0xFF);
  360. ce >>= 8;
  361. spare[i] = (uint8_t)(ce & 0xFF);
  362. }
  363. }
  364. static void lpc32xx_dma_complete_func(void *completion)
  365. {
  366. complete(completion);
  367. }
  368. static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
  369. void *mem, int len, enum dma_transfer_direction dir)
  370. {
  371. struct nand_chip *chip = mtd_to_nand(mtd);
  372. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  373. struct dma_async_tx_descriptor *desc;
  374. int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  375. int res;
  376. host->dma_slave_config.direction = dir;
  377. host->dma_slave_config.src_addr = dma;
  378. host->dma_slave_config.dst_addr = dma;
  379. host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  380. host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  381. host->dma_slave_config.src_maxburst = 4;
  382. host->dma_slave_config.dst_maxburst = 4;
  383. /* DMA controller does flow control: */
  384. host->dma_slave_config.device_fc = false;
  385. if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
  386. dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
  387. return -ENXIO;
  388. }
  389. sg_init_one(&host->sgl, mem, len);
  390. res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
  391. DMA_BIDIRECTIONAL);
  392. if (res != 1) {
  393. dev_err(mtd->dev.parent, "Failed to map sg list\n");
  394. return -ENXIO;
  395. }
  396. desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
  397. flags);
  398. if (!desc) {
  399. dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
  400. goto out1;
  401. }
  402. init_completion(&host->comp);
  403. desc->callback = lpc32xx_dma_complete_func;
  404. desc->callback_param = &host->comp;
  405. dmaengine_submit(desc);
  406. dma_async_issue_pending(host->dma_chan);
  407. wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
  408. dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
  409. DMA_BIDIRECTIONAL);
  410. return 0;
  411. out1:
  412. dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
  413. DMA_BIDIRECTIONAL);
  414. return -ENXIO;
  415. }
  416. /*
  417. * DMA read/write transfers with ECC support
  418. */
  419. static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
  420. int read)
  421. {
  422. struct nand_chip *chip = mtd_to_nand(mtd);
  423. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  424. int i, status = 0;
  425. unsigned long timeout;
  426. int res;
  427. enum dma_transfer_direction dir =
  428. read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
  429. uint8_t *dma_buf;
  430. bool dma_mapped;
  431. if ((void *)buf <= high_memory) {
  432. dma_buf = buf;
  433. dma_mapped = true;
  434. } else {
  435. dma_buf = host->data_buf;
  436. dma_mapped = false;
  437. if (!read)
  438. memcpy(host->data_buf, buf, mtd->writesize);
  439. }
  440. if (read) {
  441. writel(readl(SLC_CFG(host->io_base)) |
  442. SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
  443. SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
  444. } else {
  445. writel((readl(SLC_CFG(host->io_base)) |
  446. SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
  447. ~SLCCFG_DMA_DIR,
  448. SLC_CFG(host->io_base));
  449. }
  450. /* Clear initial ECC */
  451. writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
  452. /* Transfer size is data area only */
  453. writel(mtd->writesize, SLC_TC(host->io_base));
  454. /* Start transfer in the NAND controller */
  455. writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
  456. SLC_CTRL(host->io_base));
  457. for (i = 0; i < chip->ecc.steps; i++) {
  458. /* Data */
  459. res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
  460. dma_buf + i * chip->ecc.size,
  461. mtd->writesize / chip->ecc.steps, dir);
  462. if (res)
  463. return res;
  464. /* Always _read_ ECC */
  465. if (i == chip->ecc.steps - 1)
  466. break;
  467. if (!read) /* ECC availability delayed on write */
  468. udelay(10);
  469. res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
  470. &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
  471. if (res)
  472. return res;
  473. }
  474. /*
  475. * According to NXP, the DMA can be finished here, but the NAND
  476. * controller may still have buffered data. After porting to using the
  477. * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
  478. * appears to be always true, according to tests. Keeping the check for
  479. * safety reasons for now.
  480. */
  481. if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
  482. dev_warn(mtd->dev.parent, "FIFO not empty!\n");
  483. timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
  484. while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
  485. time_before(jiffies, timeout))
  486. cpu_relax();
  487. if (!time_before(jiffies, timeout)) {
  488. dev_err(mtd->dev.parent, "FIFO held data too long\n");
  489. status = -EIO;
  490. }
  491. }
  492. /* Read last calculated ECC value */
  493. if (!read)
  494. udelay(10);
  495. host->ecc_buf[chip->ecc.steps - 1] =
  496. readl(SLC_ECC(host->io_base));
  497. /* Flush DMA */
  498. dmaengine_terminate_all(host->dma_chan);
  499. if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
  500. readl(SLC_TC(host->io_base))) {
  501. /* Something is left in the FIFO, something is wrong */
  502. dev_err(mtd->dev.parent, "DMA FIFO failure\n");
  503. status = -EIO;
  504. }
  505. /* Stop DMA & HW ECC */
  506. writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
  507. SLC_CTRL(host->io_base));
  508. writel(readl(SLC_CFG(host->io_base)) &
  509. ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
  510. SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
  511. if (!dma_mapped && read)
  512. memcpy(buf, host->data_buf, mtd->writesize);
  513. return status;
  514. }
  515. /*
  516. * Read the data and OOB data from the device, use ECC correction with the
  517. * data, disable ECC for the OOB data
  518. */
  519. static int lpc32xx_nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
  520. int oob_required, int page)
  521. {
  522. struct mtd_info *mtd = nand_to_mtd(chip);
  523. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  524. struct mtd_oob_region oobregion = { };
  525. int stat, i, status, error;
  526. uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
  527. /* Issue read command */
  528. nand_read_page_op(chip, page, 0, NULL, 0);
  529. /* Read data and oob, calculate ECC */
  530. status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
  531. /* Get OOB data */
  532. chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
  533. /* Convert to stored ECC format */
  534. lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
  535. /* Pointer to ECC data retrieved from NAND spare area */
  536. error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
  537. if (error)
  538. return error;
  539. oobecc = chip->oob_poi + oobregion.offset;
  540. for (i = 0; i < chip->ecc.steps; i++) {
  541. stat = chip->ecc.correct(chip, buf, oobecc,
  542. &tmpecc[i * chip->ecc.bytes]);
  543. if (stat < 0)
  544. mtd->ecc_stats.failed++;
  545. else
  546. mtd->ecc_stats.corrected += stat;
  547. buf += chip->ecc.size;
  548. oobecc += chip->ecc.bytes;
  549. }
  550. return status;
  551. }
  552. /*
  553. * Read the data and OOB data from the device, no ECC correction with the
  554. * data or OOB data
  555. */
  556. static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip *chip,
  557. uint8_t *buf, int oob_required,
  558. int page)
  559. {
  560. struct mtd_info *mtd = nand_to_mtd(chip);
  561. /* Issue read command */
  562. nand_read_page_op(chip, page, 0, NULL, 0);
  563. /* Raw reads can just use the FIFO interface */
  564. chip->legacy.read_buf(chip, buf, chip->ecc.size * chip->ecc.steps);
  565. chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
  566. return 0;
  567. }
  568. /*
  569. * Write the data and OOB data to the device, use ECC with the data,
  570. * disable ECC for the OOB data
  571. */
  572. static int lpc32xx_nand_write_page_syndrome(struct nand_chip *chip,
  573. const uint8_t *buf,
  574. int oob_required, int page)
  575. {
  576. struct mtd_info *mtd = nand_to_mtd(chip);
  577. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  578. struct mtd_oob_region oobregion = { };
  579. uint8_t *pb;
  580. int error;
  581. nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  582. /* Write data, calculate ECC on outbound data */
  583. error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
  584. if (error)
  585. return error;
  586. /*
  587. * The calculated ECC needs some manual work done to it before
  588. * committing it to NAND. Process the calculated ECC and place
  589. * the resultant values directly into the OOB buffer. */
  590. error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
  591. if (error)
  592. return error;
  593. pb = chip->oob_poi + oobregion.offset;
  594. lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
  595. /* Write ECC data to device */
  596. chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
  597. return nand_prog_page_end_op(chip);
  598. }
  599. /*
  600. * Write the data and OOB data to the device, no ECC correction with the
  601. * data or OOB data
  602. */
  603. static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip *chip,
  604. const uint8_t *buf,
  605. int oob_required, int page)
  606. {
  607. struct mtd_info *mtd = nand_to_mtd(chip);
  608. /* Raw writes can just use the FIFO interface */
  609. nand_prog_page_begin_op(chip, page, 0, buf,
  610. chip->ecc.size * chip->ecc.steps);
  611. chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
  612. return nand_prog_page_end_op(chip);
  613. }
  614. static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
  615. {
  616. struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
  617. dma_cap_mask_t mask;
  618. if (!host->pdata || !host->pdata->dma_filter) {
  619. dev_err(mtd->dev.parent, "no DMA platform data\n");
  620. return -ENOENT;
  621. }
  622. dma_cap_zero(mask);
  623. dma_cap_set(DMA_SLAVE, mask);
  624. host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
  625. "nand-slc");
  626. if (!host->dma_chan) {
  627. dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
  628. return -EBUSY;
  629. }
  630. return 0;
  631. }
  632. static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
  633. {
  634. struct lpc32xx_nand_cfg_slc *ncfg;
  635. struct device_node *np = dev->of_node;
  636. ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
  637. if (!ncfg)
  638. return NULL;
  639. of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
  640. of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
  641. of_property_read_u32(np, "nxp,whold", &ncfg->whold);
  642. of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
  643. of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
  644. of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
  645. of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
  646. of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
  647. if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
  648. !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
  649. !ncfg->rhold || !ncfg->rsetup) {
  650. dev_err(dev, "chip parameters not specified correctly\n");
  651. return NULL;
  652. }
  653. ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
  654. return ncfg;
  655. }
  656. static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
  657. {
  658. struct mtd_info *mtd = nand_to_mtd(chip);
  659. struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
  660. if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
  661. return 0;
  662. /* OOB and ECC CPU and DMA work areas */
  663. host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
  664. /*
  665. * Small page FLASH has a unique OOB layout, but large and huge
  666. * page FLASH use the standard layout. Small page FLASH uses a
  667. * custom BBT marker layout.
  668. */
  669. if (mtd->writesize <= 512)
  670. mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
  671. chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
  672. /* These sizes remain the same regardless of page size */
  673. chip->ecc.size = 256;
  674. chip->ecc.strength = 1;
  675. chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
  676. chip->ecc.prepad = 0;
  677. chip->ecc.postpad = 0;
  678. chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
  679. chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
  680. chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
  681. chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
  682. chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
  683. chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
  684. chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
  685. chip->ecc.correct = rawnand_sw_hamming_correct;
  686. chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
  687. /*
  688. * Use a custom BBT marker setup for small page FLASH that
  689. * won't interfere with the ECC layout. Large and huge page
  690. * FLASH use the standard layout.
  691. */
  692. if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
  693. mtd->writesize <= 512) {
  694. chip->bbt_td = &bbt_smallpage_main_descr;
  695. chip->bbt_md = &bbt_smallpage_mirror_descr;
  696. }
  697. return 0;
  698. }
  699. static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
  700. .attach_chip = lpc32xx_nand_attach_chip,
  701. };
  702. /*
  703. * Probe for NAND controller
  704. */
  705. static int lpc32xx_nand_probe(struct platform_device *pdev)
  706. {
  707. struct lpc32xx_nand_host *host;
  708. struct mtd_info *mtd;
  709. struct nand_chip *chip;
  710. struct resource *rc;
  711. int res;
  712. /* Allocate memory for the device structure (and zero it) */
  713. host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
  714. if (!host)
  715. return -ENOMEM;
  716. rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  717. host->io_base = devm_ioremap_resource(&pdev->dev, rc);
  718. if (IS_ERR(host->io_base))
  719. return PTR_ERR(host->io_base);
  720. host->io_base_dma = rc->start;
  721. if (pdev->dev.of_node)
  722. host->ncfg = lpc32xx_parse_dt(&pdev->dev);
  723. if (!host->ncfg) {
  724. dev_err(&pdev->dev,
  725. "Missing or bad NAND config from device tree\n");
  726. return -ENOENT;
  727. }
  728. if (host->ncfg->wp_gpio == -EPROBE_DEFER)
  729. return -EPROBE_DEFER;
  730. if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
  731. host->ncfg->wp_gpio, "NAND WP")) {
  732. dev_err(&pdev->dev, "GPIO not available\n");
  733. return -EBUSY;
  734. }
  735. lpc32xx_wp_disable(host);
  736. host->pdata = dev_get_platdata(&pdev->dev);
  737. chip = &host->nand_chip;
  738. mtd = nand_to_mtd(chip);
  739. nand_set_controller_data(chip, host);
  740. nand_set_flash_node(chip, pdev->dev.of_node);
  741. mtd->owner = THIS_MODULE;
  742. mtd->dev.parent = &pdev->dev;
  743. /* Get NAND clock */
  744. host->clk = devm_clk_get(&pdev->dev, NULL);
  745. if (IS_ERR(host->clk)) {
  746. dev_err(&pdev->dev, "Clock failure\n");
  747. res = -ENOENT;
  748. goto enable_wp;
  749. }
  750. res = clk_prepare_enable(host->clk);
  751. if (res)
  752. goto enable_wp;
  753. /* Set NAND IO addresses and command/ready functions */
  754. chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
  755. chip->legacy.IO_ADDR_W = SLC_DATA(host->io_base);
  756. chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
  757. chip->legacy.dev_ready = lpc32xx_nand_device_ready;
  758. chip->legacy.chip_delay = 20; /* 20us command delay time */
  759. /* Init NAND controller */
  760. lpc32xx_nand_setup(host);
  761. platform_set_drvdata(pdev, host);
  762. /* NAND callbacks for LPC32xx SLC hardware */
  763. chip->legacy.read_byte = lpc32xx_nand_read_byte;
  764. chip->legacy.read_buf = lpc32xx_nand_read_buf;
  765. chip->legacy.write_buf = lpc32xx_nand_write_buf;
  766. /*
  767. * Allocate a large enough buffer for a single huge page plus
  768. * extra space for the spare area and ECC storage area
  769. */
  770. host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
  771. host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
  772. GFP_KERNEL);
  773. if (host->data_buf == NULL) {
  774. res = -ENOMEM;
  775. goto unprepare_clk;
  776. }
  777. res = lpc32xx_nand_dma_setup(host);
  778. if (res) {
  779. res = -EIO;
  780. goto unprepare_clk;
  781. }
  782. /* Find NAND device */
  783. chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
  784. res = nand_scan(chip, 1);
  785. if (res)
  786. goto release_dma;
  787. mtd->name = "nxp_lpc3220_slc";
  788. res = mtd_device_register(mtd, host->ncfg->parts,
  789. host->ncfg->num_parts);
  790. if (res)
  791. goto cleanup_nand;
  792. return 0;
  793. cleanup_nand:
  794. nand_cleanup(chip);
  795. release_dma:
  796. dma_release_channel(host->dma_chan);
  797. unprepare_clk:
  798. clk_disable_unprepare(host->clk);
  799. enable_wp:
  800. lpc32xx_wp_enable(host);
  801. return res;
  802. }
  803. /*
  804. * Remove NAND device.
  805. */
  806. static int lpc32xx_nand_remove(struct platform_device *pdev)
  807. {
  808. uint32_t tmp;
  809. struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
  810. struct nand_chip *chip = &host->nand_chip;
  811. int ret;
  812. ret = mtd_device_unregister(nand_to_mtd(chip));
  813. WARN_ON(ret);
  814. nand_cleanup(chip);
  815. dma_release_channel(host->dma_chan);
  816. /* Force CE high */
  817. tmp = readl(SLC_CTRL(host->io_base));
  818. tmp &= ~SLCCFG_CE_LOW;
  819. writel(tmp, SLC_CTRL(host->io_base));
  820. clk_disable_unprepare(host->clk);
  821. lpc32xx_wp_enable(host);
  822. return 0;
  823. }
  824. #ifdef CONFIG_PM
  825. static int lpc32xx_nand_resume(struct platform_device *pdev)
  826. {
  827. struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
  828. int ret;
  829. /* Re-enable NAND clock */
  830. ret = clk_prepare_enable(host->clk);
  831. if (ret)
  832. return ret;
  833. /* Fresh init of NAND controller */
  834. lpc32xx_nand_setup(host);
  835. /* Disable write protect */
  836. lpc32xx_wp_disable(host);
  837. return 0;
  838. }
  839. static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
  840. {
  841. uint32_t tmp;
  842. struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
  843. /* Force CE high */
  844. tmp = readl(SLC_CTRL(host->io_base));
  845. tmp &= ~SLCCFG_CE_LOW;
  846. writel(tmp, SLC_CTRL(host->io_base));
  847. /* Enable write protect for safety */
  848. lpc32xx_wp_enable(host);
  849. /* Disable clock */
  850. clk_disable_unprepare(host->clk);
  851. return 0;
  852. }
  853. #else
  854. #define lpc32xx_nand_resume NULL
  855. #define lpc32xx_nand_suspend NULL
  856. #endif
  857. static const struct of_device_id lpc32xx_nand_match[] = {
  858. { .compatible = "nxp,lpc3220-slc" },
  859. { /* sentinel */ },
  860. };
  861. MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
  862. static struct platform_driver lpc32xx_nand_driver = {
  863. .probe = lpc32xx_nand_probe,
  864. .remove = lpc32xx_nand_remove,
  865. .resume = lpc32xx_nand_resume,
  866. .suspend = lpc32xx_nand_suspend,
  867. .driver = {
  868. .name = LPC32XX_MODNAME,
  869. .of_match_table = lpc32xx_nand_match,
  870. },
  871. };
  872. module_platform_driver(lpc32xx_nand_driver);
  873. MODULE_LICENSE("GPL");
  874. MODULE_AUTHOR("Kevin Wells <[email protected]>");
  875. MODULE_AUTHOR("Roland Stigge <[email protected]>");
  876. MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");