onenand_omap2.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * OneNAND driver for OMAP2 / OMAP3
  4. *
  5. * Copyright © 2005-2006 Nokia Corporation
  6. *
  7. * Author: Jarkko Lavinen <[email protected]> and Juha Yrjölä
  8. * IRQ and DMA support written by Timo Teras
  9. */
  10. #include <linux/device.h>
  11. #include <linux/module.h>
  12. #include <linux/mtd/mtd.h>
  13. #include <linux/mtd/onenand.h>
  14. #include <linux/mtd/partitions.h>
  15. #include <linux/of_device.h>
  16. #include <linux/omap-gpmc.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/delay.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/dmaengine.h>
  22. #include <linux/io.h>
  23. #include <linux/slab.h>
  24. #include <linux/gpio/consumer.h>
  25. #include <asm/mach/flash.h>
  26. #define DRIVER_NAME "omap2-onenand"
  27. #define ONENAND_BUFRAM_SIZE (1024 * 5)
  28. struct omap2_onenand {
  29. struct platform_device *pdev;
  30. int gpmc_cs;
  31. unsigned long phys_base;
  32. struct gpio_desc *int_gpiod;
  33. struct mtd_info mtd;
  34. struct onenand_chip onenand;
  35. struct completion irq_done;
  36. struct completion dma_done;
  37. struct dma_chan *dma_chan;
  38. };
  39. static void omap2_onenand_dma_complete_func(void *completion)
  40. {
  41. complete(completion);
  42. }
  43. static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
  44. {
  45. struct omap2_onenand *c = dev_id;
  46. complete(&c->irq_done);
  47. return IRQ_HANDLED;
  48. }
  49. static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
  50. {
  51. return readw(c->onenand.base + reg);
  52. }
  53. static inline void write_reg(struct omap2_onenand *c, unsigned short value,
  54. int reg)
  55. {
  56. writew(value, c->onenand.base + reg);
  57. }
  58. static int omap2_onenand_set_cfg(struct omap2_onenand *c,
  59. bool sr, bool sw,
  60. int latency, int burst_len)
  61. {
  62. unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
  63. reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
  64. switch (burst_len) {
  65. case 0: /* continuous */
  66. break;
  67. case 4:
  68. reg |= ONENAND_SYS_CFG1_BL_4;
  69. break;
  70. case 8:
  71. reg |= ONENAND_SYS_CFG1_BL_8;
  72. break;
  73. case 16:
  74. reg |= ONENAND_SYS_CFG1_BL_16;
  75. break;
  76. case 32:
  77. reg |= ONENAND_SYS_CFG1_BL_32;
  78. break;
  79. default:
  80. return -EINVAL;
  81. }
  82. if (latency > 5)
  83. reg |= ONENAND_SYS_CFG1_HF;
  84. if (latency > 7)
  85. reg |= ONENAND_SYS_CFG1_VHF;
  86. if (sr)
  87. reg |= ONENAND_SYS_CFG1_SYNC_READ;
  88. if (sw)
  89. reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
  90. write_reg(c, reg, ONENAND_REG_SYS_CFG1);
  91. return 0;
  92. }
  93. static int omap2_onenand_get_freq(int ver)
  94. {
  95. switch ((ver >> 4) & 0xf) {
  96. case 0:
  97. return 40;
  98. case 1:
  99. return 54;
  100. case 2:
  101. return 66;
  102. case 3:
  103. return 83;
  104. case 4:
  105. return 104;
  106. }
  107. return -EINVAL;
  108. }
  109. static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
  110. {
  111. printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
  112. msg, state, ctrl, intr);
  113. }
  114. static void wait_warn(char *msg, int state, unsigned int ctrl,
  115. unsigned int intr)
  116. {
  117. printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
  118. "intr 0x%04x\n", msg, state, ctrl, intr);
  119. }
  120. static int omap2_onenand_wait(struct mtd_info *mtd, int state)
  121. {
  122. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  123. struct onenand_chip *this = mtd->priv;
  124. unsigned int intr = 0;
  125. unsigned int ctrl, ctrl_mask;
  126. unsigned long timeout;
  127. u32 syscfg;
  128. if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
  129. state == FL_VERIFYING_ERASE) {
  130. int i = 21;
  131. unsigned int intr_flags = ONENAND_INT_MASTER;
  132. switch (state) {
  133. case FL_RESETTING:
  134. intr_flags |= ONENAND_INT_RESET;
  135. break;
  136. case FL_PREPARING_ERASE:
  137. intr_flags |= ONENAND_INT_ERASE;
  138. break;
  139. case FL_VERIFYING_ERASE:
  140. i = 101;
  141. break;
  142. }
  143. while (--i) {
  144. udelay(1);
  145. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  146. if (intr & ONENAND_INT_MASTER)
  147. break;
  148. }
  149. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  150. if (ctrl & ONENAND_CTRL_ERROR) {
  151. wait_err("controller error", state, ctrl, intr);
  152. return -EIO;
  153. }
  154. if ((intr & intr_flags) == intr_flags)
  155. return 0;
  156. /* Continue in wait for interrupt branch */
  157. }
  158. if (state != FL_READING) {
  159. int result;
  160. /* Turn interrupts on */
  161. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  162. if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
  163. syscfg |= ONENAND_SYS_CFG1_IOBE;
  164. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  165. /* Add a delay to let GPIO settle */
  166. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  167. }
  168. reinit_completion(&c->irq_done);
  169. result = gpiod_get_value(c->int_gpiod);
  170. if (result < 0) {
  171. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  172. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  173. wait_err("gpio error", state, ctrl, intr);
  174. return result;
  175. } else if (result == 0) {
  176. int retry_cnt = 0;
  177. retry:
  178. if (!wait_for_completion_io_timeout(&c->irq_done,
  179. msecs_to_jiffies(20))) {
  180. /* Timeout after 20ms */
  181. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  182. if (ctrl & ONENAND_CTRL_ONGO &&
  183. !this->ongoing) {
  184. /*
  185. * The operation seems to be still going
  186. * so give it some more time.
  187. */
  188. retry_cnt += 1;
  189. if (retry_cnt < 3)
  190. goto retry;
  191. intr = read_reg(c,
  192. ONENAND_REG_INTERRUPT);
  193. wait_err("timeout", state, ctrl, intr);
  194. return -EIO;
  195. }
  196. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  197. if ((intr & ONENAND_INT_MASTER) == 0)
  198. wait_warn("timeout", state, ctrl, intr);
  199. }
  200. }
  201. } else {
  202. int retry_cnt = 0;
  203. /* Turn interrupts off */
  204. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  205. syscfg &= ~ONENAND_SYS_CFG1_IOBE;
  206. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  207. timeout = jiffies + msecs_to_jiffies(20);
  208. while (1) {
  209. if (time_before(jiffies, timeout)) {
  210. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  211. if (intr & ONENAND_INT_MASTER)
  212. break;
  213. } else {
  214. /* Timeout after 20ms */
  215. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  216. if (ctrl & ONENAND_CTRL_ONGO) {
  217. /*
  218. * The operation seems to be still going
  219. * so give it some more time.
  220. */
  221. retry_cnt += 1;
  222. if (retry_cnt < 3) {
  223. timeout = jiffies +
  224. msecs_to_jiffies(20);
  225. continue;
  226. }
  227. }
  228. break;
  229. }
  230. }
  231. }
  232. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  233. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  234. if (intr & ONENAND_INT_READ) {
  235. int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
  236. if (ecc) {
  237. unsigned int addr1, addr8;
  238. addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
  239. addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
  240. if (ecc & ONENAND_ECC_2BIT_ALL) {
  241. printk(KERN_ERR "onenand_wait: ECC error = "
  242. "0x%04x, addr1 %#x, addr8 %#x\n",
  243. ecc, addr1, addr8);
  244. mtd->ecc_stats.failed++;
  245. return -EBADMSG;
  246. } else if (ecc & ONENAND_ECC_1BIT_ALL) {
  247. printk(KERN_NOTICE "onenand_wait: correctable "
  248. "ECC error = 0x%04x, addr1 %#x, "
  249. "addr8 %#x\n", ecc, addr1, addr8);
  250. mtd->ecc_stats.corrected++;
  251. }
  252. }
  253. } else if (state == FL_READING) {
  254. wait_err("timeout", state, ctrl, intr);
  255. return -EIO;
  256. }
  257. if (ctrl & ONENAND_CTRL_ERROR) {
  258. wait_err("controller error", state, ctrl, intr);
  259. if (ctrl & ONENAND_CTRL_LOCK)
  260. printk(KERN_ERR "onenand_wait: "
  261. "Device is write protected!!!\n");
  262. return -EIO;
  263. }
  264. ctrl_mask = 0xFE9F;
  265. if (this->ongoing)
  266. ctrl_mask &= ~0x8000;
  267. if (ctrl & ctrl_mask)
  268. wait_warn("unexpected controller status", state, ctrl, intr);
  269. return 0;
  270. }
  271. static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
  272. {
  273. struct onenand_chip *this = mtd->priv;
  274. if (ONENAND_CURRENT_BUFFERRAM(this)) {
  275. if (area == ONENAND_DATARAM)
  276. return this->writesize;
  277. if (area == ONENAND_SPARERAM)
  278. return mtd->oobsize;
  279. }
  280. return 0;
  281. }
  282. static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
  283. dma_addr_t src, dma_addr_t dst,
  284. size_t count)
  285. {
  286. struct dma_async_tx_descriptor *tx;
  287. dma_cookie_t cookie;
  288. tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
  289. DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
  290. if (!tx) {
  291. dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
  292. return -EIO;
  293. }
  294. reinit_completion(&c->dma_done);
  295. tx->callback = omap2_onenand_dma_complete_func;
  296. tx->callback_param = &c->dma_done;
  297. cookie = tx->tx_submit(tx);
  298. if (dma_submit_error(cookie)) {
  299. dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
  300. return -EIO;
  301. }
  302. dma_async_issue_pending(c->dma_chan);
  303. if (!wait_for_completion_io_timeout(&c->dma_done,
  304. msecs_to_jiffies(20))) {
  305. dmaengine_terminate_sync(c->dma_chan);
  306. return -ETIMEDOUT;
  307. }
  308. return 0;
  309. }
  310. static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  311. unsigned char *buffer, int offset,
  312. size_t count)
  313. {
  314. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  315. struct onenand_chip *this = mtd->priv;
  316. struct device *dev = &c->pdev->dev;
  317. void *buf = (void *)buffer;
  318. dma_addr_t dma_src, dma_dst;
  319. int bram_offset, err;
  320. size_t xtra;
  321. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  322. /*
  323. * If the buffer address is not DMA-able, len is not long enough to
  324. * make DMA transfers profitable or if invoked from panic_write()
  325. * fallback to PIO mode.
  326. */
  327. if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
  328. count < 384 || mtd->oops_panic_write)
  329. goto out_copy;
  330. xtra = count & 3;
  331. if (xtra) {
  332. count -= xtra;
  333. memcpy(buf + count, this->base + bram_offset + count, xtra);
  334. }
  335. dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
  336. dma_src = c->phys_base + bram_offset;
  337. if (dma_mapping_error(dev, dma_dst)) {
  338. dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
  339. goto out_copy;
  340. }
  341. err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
  342. dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
  343. if (!err)
  344. return 0;
  345. dev_err(dev, "timeout waiting for DMA\n");
  346. out_copy:
  347. memcpy(buf, this->base + bram_offset, count);
  348. return 0;
  349. }
  350. static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  351. const unsigned char *buffer,
  352. int offset, size_t count)
  353. {
  354. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  355. struct onenand_chip *this = mtd->priv;
  356. struct device *dev = &c->pdev->dev;
  357. void *buf = (void *)buffer;
  358. dma_addr_t dma_src, dma_dst;
  359. int bram_offset, err;
  360. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  361. /*
  362. * If the buffer address is not DMA-able, len is not long enough to
  363. * make DMA transfers profitable or if invoked from panic_write()
  364. * fallback to PIO mode.
  365. */
  366. if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
  367. count < 384 || mtd->oops_panic_write)
  368. goto out_copy;
  369. dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
  370. dma_dst = c->phys_base + bram_offset;
  371. if (dma_mapping_error(dev, dma_src)) {
  372. dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
  373. goto out_copy;
  374. }
  375. err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
  376. dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
  377. if (!err)
  378. return 0;
  379. dev_err(dev, "timeout waiting for DMA\n");
  380. out_copy:
  381. memcpy(this->base + bram_offset, buf, count);
  382. return 0;
  383. }
  384. static void omap2_onenand_shutdown(struct platform_device *pdev)
  385. {
  386. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  387. /* With certain content in the buffer RAM, the OMAP boot ROM code
  388. * can recognize the flash chip incorrectly. Zero it out before
  389. * soft reset.
  390. */
  391. memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
  392. }
  393. static int omap2_onenand_probe(struct platform_device *pdev)
  394. {
  395. u32 val;
  396. dma_cap_mask_t mask;
  397. int freq, latency, r;
  398. struct resource *res;
  399. struct omap2_onenand *c;
  400. struct gpmc_onenand_info info;
  401. struct device *dev = &pdev->dev;
  402. struct device_node *np = dev->of_node;
  403. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  404. if (!res) {
  405. dev_err(dev, "error getting memory resource\n");
  406. return -EINVAL;
  407. }
  408. r = of_property_read_u32(np, "reg", &val);
  409. if (r) {
  410. dev_err(dev, "reg not found in DT\n");
  411. return r;
  412. }
  413. c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
  414. if (!c)
  415. return -ENOMEM;
  416. init_completion(&c->irq_done);
  417. init_completion(&c->dma_done);
  418. c->gpmc_cs = val;
  419. c->phys_base = res->start;
  420. c->onenand.base = devm_ioremap_resource(dev, res);
  421. if (IS_ERR(c->onenand.base))
  422. return PTR_ERR(c->onenand.base);
  423. c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
  424. if (IS_ERR(c->int_gpiod)) {
  425. /* Just try again if this happens */
  426. return dev_err_probe(dev, PTR_ERR(c->int_gpiod), "error getting gpio\n");
  427. }
  428. if (c->int_gpiod) {
  429. r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
  430. omap2_onenand_interrupt,
  431. IRQF_TRIGGER_RISING, "onenand", c);
  432. if (r)
  433. return r;
  434. c->onenand.wait = omap2_onenand_wait;
  435. }
  436. dma_cap_zero(mask);
  437. dma_cap_set(DMA_MEMCPY, mask);
  438. c->dma_chan = dma_request_channel(mask, NULL, NULL);
  439. if (c->dma_chan) {
  440. c->onenand.read_bufferram = omap2_onenand_read_bufferram;
  441. c->onenand.write_bufferram = omap2_onenand_write_bufferram;
  442. }
  443. c->pdev = pdev;
  444. c->mtd.priv = &c->onenand;
  445. c->mtd.dev.parent = dev;
  446. mtd_set_of_node(&c->mtd, dev->of_node);
  447. dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
  448. c->gpmc_cs, c->phys_base, c->onenand.base,
  449. c->dma_chan ? "DMA" : "PIO");
  450. r = onenand_scan(&c->mtd, 1);
  451. if (r < 0)
  452. goto err_release_dma;
  453. freq = omap2_onenand_get_freq(c->onenand.version_id);
  454. if (freq > 0) {
  455. switch (freq) {
  456. case 104:
  457. latency = 7;
  458. break;
  459. case 83:
  460. latency = 6;
  461. break;
  462. case 66:
  463. latency = 5;
  464. break;
  465. case 56:
  466. latency = 4;
  467. break;
  468. default: /* 40 MHz or lower */
  469. latency = 3;
  470. break;
  471. }
  472. r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
  473. freq, latency, &info);
  474. if (r)
  475. goto err_release_onenand;
  476. r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
  477. latency, info.burst_len);
  478. if (r)
  479. goto err_release_onenand;
  480. if (info.sync_read || info.sync_write)
  481. dev_info(dev, "optimized timings for %d MHz\n", freq);
  482. }
  483. r = mtd_device_register(&c->mtd, NULL, 0);
  484. if (r)
  485. goto err_release_onenand;
  486. platform_set_drvdata(pdev, c);
  487. return 0;
  488. err_release_onenand:
  489. onenand_release(&c->mtd);
  490. err_release_dma:
  491. if (c->dma_chan)
  492. dma_release_channel(c->dma_chan);
  493. return r;
  494. }
  495. static int omap2_onenand_remove(struct platform_device *pdev)
  496. {
  497. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  498. onenand_release(&c->mtd);
  499. if (c->dma_chan)
  500. dma_release_channel(c->dma_chan);
  501. omap2_onenand_shutdown(pdev);
  502. return 0;
  503. }
  504. static const struct of_device_id omap2_onenand_id_table[] = {
  505. { .compatible = "ti,omap2-onenand", },
  506. {},
  507. };
  508. MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
  509. static struct platform_driver omap2_onenand_driver = {
  510. .probe = omap2_onenand_probe,
  511. .remove = omap2_onenand_remove,
  512. .shutdown = omap2_onenand_shutdown,
  513. .driver = {
  514. .name = DRIVER_NAME,
  515. .of_match_table = omap2_onenand_id_table,
  516. },
  517. };
  518. module_platform_driver(omap2_onenand_driver);
  519. MODULE_ALIAS("platform:" DRIVER_NAME);
  520. MODULE_LICENSE("GPL");
  521. MODULE_AUTHOR("Jarkko Lavinen <[email protected]>");
  522. MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");