denali.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NAND Flash Controller Device Driver
  4. * Copyright © 2009-2010, Intel Corporation and its suppliers.
  5. *
  6. * Copyright (c) 2017-2019 Socionext Inc.
  7. * Reworked by Masahiro Yamada <[email protected]>
  8. */
  9. #include <linux/bitfield.h>
  10. #include <linux/completion.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/module.h>
  15. #include <linux/mtd/mtd.h>
  16. #include <linux/mtd/rawnand.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include "denali.h"
  20. #define DENALI_NAND_NAME "denali-nand"
  21. /* for Indexed Addressing */
  22. #define DENALI_INDEXED_CTRL 0x00
  23. #define DENALI_INDEXED_DATA 0x10
  24. #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
  25. #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
  26. #define DENALI_MAP10 (2 << 26) /* high-level control plane */
  27. #define DENALI_MAP11 (3 << 26) /* direct controller access */
  28. /* MAP11 access cycle type */
  29. #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
  30. #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
  31. #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
  32. #define DENALI_BANK(denali) ((denali)->active_bank << 24)
  33. #define DENALI_INVALID_BANK -1
  34. static struct denali_chip *to_denali_chip(struct nand_chip *chip)
  35. {
  36. return container_of(chip, struct denali_chip, chip);
  37. }
  38. static struct denali_controller *to_denali_controller(struct nand_chip *chip)
  39. {
  40. return container_of(chip->controller, struct denali_controller,
  41. controller);
  42. }
  43. /*
  44. * Direct Addressing - the slave address forms the control information (command
  45. * type, bank, block, and page address). The slave data is the actual data to
  46. * be transferred. This mode requires 28 bits of address region allocated.
  47. */
  48. static u32 denali_direct_read(struct denali_controller *denali, u32 addr)
  49. {
  50. return ioread32(denali->host + addr);
  51. }
  52. static void denali_direct_write(struct denali_controller *denali, u32 addr,
  53. u32 data)
  54. {
  55. iowrite32(data, denali->host + addr);
  56. }
  57. /*
  58. * Indexed Addressing - address translation module intervenes in passing the
  59. * control information. This mode reduces the required address range. The
  60. * control information and transferred data are latched by the registers in
  61. * the translation module.
  62. */
  63. static u32 denali_indexed_read(struct denali_controller *denali, u32 addr)
  64. {
  65. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  66. return ioread32(denali->host + DENALI_INDEXED_DATA);
  67. }
  68. static void denali_indexed_write(struct denali_controller *denali, u32 addr,
  69. u32 data)
  70. {
  71. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  72. iowrite32(data, denali->host + DENALI_INDEXED_DATA);
  73. }
  74. static void denali_enable_irq(struct denali_controller *denali)
  75. {
  76. int i;
  77. for (i = 0; i < denali->nbanks; i++)
  78. iowrite32(U32_MAX, denali->reg + INTR_EN(i));
  79. iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
  80. }
  81. static void denali_disable_irq(struct denali_controller *denali)
  82. {
  83. int i;
  84. for (i = 0; i < denali->nbanks; i++)
  85. iowrite32(0, denali->reg + INTR_EN(i));
  86. iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
  87. }
  88. static void denali_clear_irq(struct denali_controller *denali,
  89. int bank, u32 irq_status)
  90. {
  91. /* write one to clear bits */
  92. iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
  93. }
  94. static void denali_clear_irq_all(struct denali_controller *denali)
  95. {
  96. int i;
  97. for (i = 0; i < denali->nbanks; i++)
  98. denali_clear_irq(denali, i, U32_MAX);
  99. }
  100. static irqreturn_t denali_isr(int irq, void *dev_id)
  101. {
  102. struct denali_controller *denali = dev_id;
  103. irqreturn_t ret = IRQ_NONE;
  104. u32 irq_status;
  105. int i;
  106. spin_lock(&denali->irq_lock);
  107. for (i = 0; i < denali->nbanks; i++) {
  108. irq_status = ioread32(denali->reg + INTR_STATUS(i));
  109. if (irq_status)
  110. ret = IRQ_HANDLED;
  111. denali_clear_irq(denali, i, irq_status);
  112. if (i != denali->active_bank)
  113. continue;
  114. denali->irq_status |= irq_status;
  115. if (denali->irq_status & denali->irq_mask)
  116. complete(&denali->complete);
  117. }
  118. spin_unlock(&denali->irq_lock);
  119. return ret;
  120. }
  121. static void denali_reset_irq(struct denali_controller *denali)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&denali->irq_lock, flags);
  125. denali->irq_status = 0;
  126. denali->irq_mask = 0;
  127. spin_unlock_irqrestore(&denali->irq_lock, flags);
  128. }
  129. static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask)
  130. {
  131. unsigned long time_left, flags;
  132. u32 irq_status;
  133. spin_lock_irqsave(&denali->irq_lock, flags);
  134. irq_status = denali->irq_status;
  135. if (irq_mask & irq_status) {
  136. /* return immediately if the IRQ has already happened. */
  137. spin_unlock_irqrestore(&denali->irq_lock, flags);
  138. return irq_status;
  139. }
  140. denali->irq_mask = irq_mask;
  141. reinit_completion(&denali->complete);
  142. spin_unlock_irqrestore(&denali->irq_lock, flags);
  143. time_left = wait_for_completion_timeout(&denali->complete,
  144. msecs_to_jiffies(1000));
  145. if (!time_left) {
  146. dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
  147. irq_mask);
  148. return 0;
  149. }
  150. return denali->irq_status;
  151. }
  152. static void denali_select_target(struct nand_chip *chip, int cs)
  153. {
  154. struct denali_controller *denali = to_denali_controller(chip);
  155. struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs];
  156. struct mtd_info *mtd = nand_to_mtd(chip);
  157. denali->active_bank = sel->bank;
  158. iowrite32(1 << (chip->phys_erase_shift - chip->page_shift),
  159. denali->reg + PAGES_PER_BLOCK);
  160. iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
  161. denali->reg + DEVICE_WIDTH);
  162. iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
  163. iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
  164. iowrite32(chip->options & NAND_ROW_ADDR_3 ?
  165. 0 : TWO_ROW_ADDR_CYCLES__FLAG,
  166. denali->reg + TWO_ROW_ADDR_CYCLES);
  167. iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
  168. FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
  169. denali->reg + ECC_CORRECTION);
  170. iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
  171. iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
  172. iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS);
  173. if (chip->options & NAND_KEEP_TIMINGS)
  174. return;
  175. /* update timing registers unless NAND_KEEP_TIMINGS is set */
  176. iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE);
  177. iowrite32(sel->tcwaw_and_addr_2_data,
  178. denali->reg + TCWAW_AND_ADDR_2_DATA);
  179. iowrite32(sel->re_2_we, denali->reg + RE_2_WE);
  180. iowrite32(sel->acc_clks, denali->reg + ACC_CLKS);
  181. iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT);
  182. iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT);
  183. iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT);
  184. iowrite32(sel->re_2_re, denali->reg + RE_2_RE);
  185. }
  186. static int denali_change_column(struct nand_chip *chip, unsigned int offset,
  187. void *buf, unsigned int len, bool write)
  188. {
  189. if (write)
  190. return nand_change_write_column_op(chip, offset, buf, len,
  191. false);
  192. else
  193. return nand_change_read_column_op(chip, offset, buf, len,
  194. false);
  195. }
  196. static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write)
  197. {
  198. struct denali_controller *denali = to_denali_controller(chip);
  199. struct mtd_info *mtd = nand_to_mtd(chip);
  200. struct nand_ecc_ctrl *ecc = &chip->ecc;
  201. int writesize = mtd->writesize;
  202. int oob_skip = denali->oob_skip_bytes;
  203. int ret, i, pos, len;
  204. for (i = 0; i < ecc->steps; i++) {
  205. pos = i * (ecc->size + ecc->bytes);
  206. len = ecc->size;
  207. if (pos >= writesize) {
  208. pos += oob_skip;
  209. } else if (pos + len > writesize) {
  210. /* This chunk overwraps the BBM area. Must be split */
  211. ret = denali_change_column(chip, pos, buf,
  212. writesize - pos, write);
  213. if (ret)
  214. return ret;
  215. buf += writesize - pos;
  216. len -= writesize - pos;
  217. pos = writesize + oob_skip;
  218. }
  219. ret = denali_change_column(chip, pos, buf, len, write);
  220. if (ret)
  221. return ret;
  222. buf += len;
  223. }
  224. return 0;
  225. }
  226. static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write)
  227. {
  228. struct denali_controller *denali = to_denali_controller(chip);
  229. struct mtd_info *mtd = nand_to_mtd(chip);
  230. struct nand_ecc_ctrl *ecc = &chip->ecc;
  231. int writesize = mtd->writesize;
  232. int oobsize = mtd->oobsize;
  233. int oob_skip = denali->oob_skip_bytes;
  234. int ret, i, pos, len;
  235. /* BBM at the beginning of the OOB area */
  236. ret = denali_change_column(chip, writesize, buf, oob_skip, write);
  237. if (ret)
  238. return ret;
  239. buf += oob_skip;
  240. for (i = 0; i < ecc->steps; i++) {
  241. pos = ecc->size + i * (ecc->size + ecc->bytes);
  242. if (i == ecc->steps - 1)
  243. /* The last chunk includes OOB free */
  244. len = writesize + oobsize - pos - oob_skip;
  245. else
  246. len = ecc->bytes;
  247. if (pos >= writesize) {
  248. pos += oob_skip;
  249. } else if (pos + len > writesize) {
  250. /* This chunk overwraps the BBM area. Must be split */
  251. ret = denali_change_column(chip, pos, buf,
  252. writesize - pos, write);
  253. if (ret)
  254. return ret;
  255. buf += writesize - pos;
  256. len -= writesize - pos;
  257. pos = writesize + oob_skip;
  258. }
  259. ret = denali_change_column(chip, pos, buf, len, write);
  260. if (ret)
  261. return ret;
  262. buf += len;
  263. }
  264. return 0;
  265. }
  266. static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf,
  267. int page)
  268. {
  269. int ret;
  270. if (!buf && !oob_buf)
  271. return -EINVAL;
  272. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  273. if (ret)
  274. return ret;
  275. if (buf) {
  276. ret = denali_payload_xfer(chip, buf, false);
  277. if (ret)
  278. return ret;
  279. }
  280. if (oob_buf) {
  281. ret = denali_oob_xfer(chip, oob_buf, false);
  282. if (ret)
  283. return ret;
  284. }
  285. return 0;
  286. }
  287. static int denali_write_raw(struct nand_chip *chip, const void *buf,
  288. const void *oob_buf, int page)
  289. {
  290. int ret;
  291. if (!buf && !oob_buf)
  292. return -EINVAL;
  293. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  294. if (ret)
  295. return ret;
  296. if (buf) {
  297. ret = denali_payload_xfer(chip, (void *)buf, true);
  298. if (ret)
  299. return ret;
  300. }
  301. if (oob_buf) {
  302. ret = denali_oob_xfer(chip, (void *)oob_buf, true);
  303. if (ret)
  304. return ret;
  305. }
  306. return nand_prog_page_end_op(chip);
  307. }
  308. static int denali_read_page_raw(struct nand_chip *chip, u8 *buf,
  309. int oob_required, int page)
  310. {
  311. return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
  312. page);
  313. }
  314. static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf,
  315. int oob_required, int page)
  316. {
  317. return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
  318. page);
  319. }
  320. static int denali_read_oob(struct nand_chip *chip, int page)
  321. {
  322. return denali_read_raw(chip, NULL, chip->oob_poi, page);
  323. }
  324. static int denali_write_oob(struct nand_chip *chip, int page)
  325. {
  326. return denali_write_raw(chip, NULL, chip->oob_poi, page);
  327. }
  328. static int denali_check_erased_page(struct nand_chip *chip, u8 *buf,
  329. unsigned long uncor_ecc_flags,
  330. unsigned int max_bitflips)
  331. {
  332. struct denali_controller *denali = to_denali_controller(chip);
  333. struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
  334. struct nand_ecc_ctrl *ecc = &chip->ecc;
  335. u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
  336. int i, stat;
  337. for (i = 0; i < ecc->steps; i++) {
  338. if (!(uncor_ecc_flags & BIT(i)))
  339. continue;
  340. stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code,
  341. ecc->bytes, NULL, 0,
  342. ecc->strength);
  343. if (stat < 0) {
  344. ecc_stats->failed++;
  345. } else {
  346. ecc_stats->corrected += stat;
  347. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  348. }
  349. buf += ecc->size;
  350. ecc_code += ecc->bytes;
  351. }
  352. return max_bitflips;
  353. }
  354. static int denali_hw_ecc_fixup(struct nand_chip *chip,
  355. unsigned long *uncor_ecc_flags)
  356. {
  357. struct denali_controller *denali = to_denali_controller(chip);
  358. struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
  359. int bank = denali->active_bank;
  360. u32 ecc_cor;
  361. unsigned int max_bitflips;
  362. ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
  363. ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
  364. if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
  365. /*
  366. * This flag is set when uncorrectable error occurs at least in
  367. * one ECC sector. We can not know "how many sectors", or
  368. * "which sector(s)". We need erase-page check for all sectors.
  369. */
  370. *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
  371. return 0;
  372. }
  373. max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
  374. /*
  375. * The register holds the maximum of per-sector corrected bitflips.
  376. * This is suitable for the return value of the ->read_page() callback.
  377. * Unfortunately, we can not know the total number of corrected bits in
  378. * the page. Increase the stats by max_bitflips. (compromised solution)
  379. */
  380. ecc_stats->corrected += max_bitflips;
  381. return max_bitflips;
  382. }
  383. static int denali_sw_ecc_fixup(struct nand_chip *chip,
  384. unsigned long *uncor_ecc_flags, u8 *buf)
  385. {
  386. struct denali_controller *denali = to_denali_controller(chip);
  387. struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
  388. unsigned int ecc_size = chip->ecc.size;
  389. unsigned int bitflips = 0;
  390. unsigned int max_bitflips = 0;
  391. u32 err_addr, err_cor_info;
  392. unsigned int err_byte, err_sector, err_device;
  393. u8 err_cor_value;
  394. unsigned int prev_sector = 0;
  395. u32 irq_status;
  396. denali_reset_irq(denali);
  397. do {
  398. err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
  399. err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
  400. err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
  401. err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
  402. err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
  403. err_cor_info);
  404. err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
  405. err_cor_info);
  406. /* reset the bitflip counter when crossing ECC sector */
  407. if (err_sector != prev_sector)
  408. bitflips = 0;
  409. if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
  410. /*
  411. * Check later if this is a real ECC error, or
  412. * an erased sector.
  413. */
  414. *uncor_ecc_flags |= BIT(err_sector);
  415. } else if (err_byte < ecc_size) {
  416. /*
  417. * If err_byte is larger than ecc_size, means error
  418. * happened in OOB, so we ignore it. It's no need for
  419. * us to correct it err_device is represented the NAND
  420. * error bits are happened in if there are more than
  421. * one NAND connected.
  422. */
  423. int offset;
  424. unsigned int flips_in_byte;
  425. offset = (err_sector * ecc_size + err_byte) *
  426. denali->devs_per_cs + err_device;
  427. /* correct the ECC error */
  428. flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
  429. buf[offset] ^= err_cor_value;
  430. ecc_stats->corrected += flips_in_byte;
  431. bitflips += flips_in_byte;
  432. max_bitflips = max(max_bitflips, bitflips);
  433. }
  434. prev_sector = err_sector;
  435. } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
  436. /*
  437. * Once handle all ECC errors, controller will trigger an
  438. * ECC_TRANSACTION_DONE interrupt.
  439. */
  440. irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
  441. if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
  442. return -EIO;
  443. return max_bitflips;
  444. }
  445. static void denali_setup_dma64(struct denali_controller *denali,
  446. dma_addr_t dma_addr, int page, bool write)
  447. {
  448. u32 mode;
  449. const int page_count = 1;
  450. mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
  451. /* DMA is a three step process */
  452. /*
  453. * 1. setup transfer type, interrupt when complete,
  454. * burst len = 64 bytes, the number of pages
  455. */
  456. denali->host_write(denali, mode,
  457. 0x01002000 | (64 << 16) |
  458. (write ? BIT(8) : 0) | page_count);
  459. /* 2. set memory low address */
  460. denali->host_write(denali, mode, lower_32_bits(dma_addr));
  461. /* 3. set memory high address */
  462. denali->host_write(denali, mode, upper_32_bits(dma_addr));
  463. }
  464. static void denali_setup_dma32(struct denali_controller *denali,
  465. dma_addr_t dma_addr, int page, bool write)
  466. {
  467. u32 mode;
  468. const int page_count = 1;
  469. mode = DENALI_MAP10 | DENALI_BANK(denali);
  470. /* DMA is a four step process */
  471. /* 1. setup transfer type and # of pages */
  472. denali->host_write(denali, mode | page,
  473. 0x2000 | (write ? BIT(8) : 0) | page_count);
  474. /* 2. set memory high address bits 23:8 */
  475. denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
  476. /* 3. set memory low address bits 23:8 */
  477. denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
  478. /* 4. interrupt when complete, burst len = 64 bytes */
  479. denali->host_write(denali, mode | 0x14000, 0x2400);
  480. }
  481. static int denali_pio_read(struct denali_controller *denali, u32 *buf,
  482. size_t size, int page)
  483. {
  484. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  485. u32 irq_status, ecc_err_mask;
  486. int i;
  487. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  488. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  489. else
  490. ecc_err_mask = INTR__ECC_ERR;
  491. denali_reset_irq(denali);
  492. for (i = 0; i < size / 4; i++)
  493. buf[i] = denali->host_read(denali, addr);
  494. irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
  495. if (!(irq_status & INTR__PAGE_XFER_INC))
  496. return -EIO;
  497. if (irq_status & INTR__ERASED_PAGE)
  498. memset(buf, 0xff, size);
  499. return irq_status & ecc_err_mask ? -EBADMSG : 0;
  500. }
  501. static int denali_pio_write(struct denali_controller *denali, const u32 *buf,
  502. size_t size, int page)
  503. {
  504. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  505. u32 irq_status;
  506. int i;
  507. denali_reset_irq(denali);
  508. for (i = 0; i < size / 4; i++)
  509. denali->host_write(denali, addr, buf[i]);
  510. irq_status = denali_wait_for_irq(denali,
  511. INTR__PROGRAM_COMP |
  512. INTR__PROGRAM_FAIL);
  513. if (!(irq_status & INTR__PROGRAM_COMP))
  514. return -EIO;
  515. return 0;
  516. }
  517. static int denali_pio_xfer(struct denali_controller *denali, void *buf,
  518. size_t size, int page, bool write)
  519. {
  520. if (write)
  521. return denali_pio_write(denali, buf, size, page);
  522. else
  523. return denali_pio_read(denali, buf, size, page);
  524. }
  525. static int denali_dma_xfer(struct denali_controller *denali, void *buf,
  526. size_t size, int page, bool write)
  527. {
  528. dma_addr_t dma_addr;
  529. u32 irq_mask, irq_status, ecc_err_mask;
  530. enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  531. int ret = 0;
  532. dma_addr = dma_map_single(denali->dev, buf, size, dir);
  533. if (dma_mapping_error(denali->dev, dma_addr)) {
  534. dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
  535. return denali_pio_xfer(denali, buf, size, page, write);
  536. }
  537. if (write) {
  538. /*
  539. * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
  540. * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
  541. * when the page program is completed.
  542. */
  543. irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
  544. ecc_err_mask = 0;
  545. } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
  546. irq_mask = INTR__DMA_CMD_COMP;
  547. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  548. } else {
  549. irq_mask = INTR__DMA_CMD_COMP;
  550. ecc_err_mask = INTR__ECC_ERR;
  551. }
  552. iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
  553. /*
  554. * The ->setup_dma() hook kicks DMA by using the data/command
  555. * interface, which belongs to a different AXI port from the
  556. * register interface. Read back the register to avoid a race.
  557. */
  558. ioread32(denali->reg + DMA_ENABLE);
  559. denali_reset_irq(denali);
  560. denali->setup_dma(denali, dma_addr, page, write);
  561. irq_status = denali_wait_for_irq(denali, irq_mask);
  562. if (!(irq_status & INTR__DMA_CMD_COMP))
  563. ret = -EIO;
  564. else if (irq_status & ecc_err_mask)
  565. ret = -EBADMSG;
  566. iowrite32(0, denali->reg + DMA_ENABLE);
  567. dma_unmap_single(denali->dev, dma_addr, size, dir);
  568. if (irq_status & INTR__ERASED_PAGE)
  569. memset(buf, 0xff, size);
  570. return ret;
  571. }
  572. static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size,
  573. int page, bool write)
  574. {
  575. struct denali_controller *denali = to_denali_controller(chip);
  576. denali_select_target(chip, chip->cur_cs);
  577. if (denali->dma_avail)
  578. return denali_dma_xfer(denali, buf, size, page, write);
  579. else
  580. return denali_pio_xfer(denali, buf, size, page, write);
  581. }
  582. static int denali_read_page(struct nand_chip *chip, u8 *buf,
  583. int oob_required, int page)
  584. {
  585. struct denali_controller *denali = to_denali_controller(chip);
  586. struct mtd_info *mtd = nand_to_mtd(chip);
  587. unsigned long uncor_ecc_flags = 0;
  588. int stat = 0;
  589. int ret;
  590. ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
  591. if (ret && ret != -EBADMSG)
  592. return ret;
  593. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  594. stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags);
  595. else if (ret == -EBADMSG)
  596. stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf);
  597. if (stat < 0)
  598. return stat;
  599. if (uncor_ecc_flags) {
  600. ret = denali_read_oob(chip, page);
  601. if (ret)
  602. return ret;
  603. stat = denali_check_erased_page(chip, buf,
  604. uncor_ecc_flags, stat);
  605. }
  606. return stat;
  607. }
  608. static int denali_write_page(struct nand_chip *chip, const u8 *buf,
  609. int oob_required, int page)
  610. {
  611. struct mtd_info *mtd = nand_to_mtd(chip);
  612. return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
  613. }
  614. static int denali_setup_interface(struct nand_chip *chip, int chipnr,
  615. const struct nand_interface_config *conf)
  616. {
  617. static const unsigned int data_setup_on_host = 10000;
  618. struct denali_controller *denali = to_denali_controller(chip);
  619. struct denali_chip_sel *sel;
  620. const struct nand_sdr_timings *timings;
  621. unsigned long t_x, mult_x;
  622. int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
  623. int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
  624. int addr_2_data_mask;
  625. u32 tmp;
  626. timings = nand_get_sdr_timings(conf);
  627. if (IS_ERR(timings))
  628. return PTR_ERR(timings);
  629. /* clk_x period in picoseconds */
  630. t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
  631. if (!t_x)
  632. return -EINVAL;
  633. /*
  634. * The bus interface clock, clk_x, is phase aligned with the core clock.
  635. * The clk_x is an integral multiple N of the core clk. The value N is
  636. * configured at IP delivery time, and its available value is 4, 5, 6.
  637. */
  638. mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
  639. if (mult_x < 4 || mult_x > 6)
  640. return -EINVAL;
  641. if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
  642. return 0;
  643. sel = &to_denali_chip(chip)->sels[chipnr];
  644. /* tRWH -> RE_2_WE */
  645. re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
  646. re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
  647. tmp = ioread32(denali->reg + RE_2_WE);
  648. tmp &= ~RE_2_WE__VALUE;
  649. tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
  650. sel->re_2_we = tmp;
  651. /* tRHZ -> RE_2_RE */
  652. re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
  653. re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
  654. tmp = ioread32(denali->reg + RE_2_RE);
  655. tmp &= ~RE_2_RE__VALUE;
  656. tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
  657. sel->re_2_re = tmp;
  658. /*
  659. * tCCS, tWHR -> WE_2_RE
  660. *
  661. * With WE_2_RE properly set, the Denali controller automatically takes
  662. * care of the delay; the driver need not set NAND_WAIT_TCCS.
  663. */
  664. we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
  665. we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
  666. tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
  667. tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
  668. tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
  669. sel->hwhr2_and_we_2_re = tmp;
  670. /* tADL -> ADDR_2_DATA */
  671. /* for older versions, ADDR_2_DATA is only 6 bit wide */
  672. addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  673. if (denali->revision < 0x0501)
  674. addr_2_data_mask >>= 1;
  675. addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
  676. addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
  677. tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
  678. tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  679. tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
  680. sel->tcwaw_and_addr_2_data = tmp;
  681. /* tREH, tWH -> RDWR_EN_HI_CNT */
  682. rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
  683. t_x);
  684. rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
  685. tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
  686. tmp &= ~RDWR_EN_HI_CNT__VALUE;
  687. tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
  688. sel->rdwr_en_hi_cnt = tmp;
  689. /*
  690. * tREA -> ACC_CLKS
  691. * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT
  692. */
  693. /*
  694. * Determine the minimum of acc_clks to meet the setup timing when
  695. * capturing the incoming data.
  696. *
  697. * The delay on the chip side is well-defined as tREA, but we need to
  698. * take additional delay into account. This includes a certain degree
  699. * of unknowledge, such as signal propagation delays on the PCB and
  700. * in the SoC, load capacity of the I/O pins, etc.
  701. */
  702. acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x);
  703. /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */
  704. rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
  705. /* Extend rdwr_en_lo to meet the data hold timing */
  706. rdwr_en_lo = max_t(int, rdwr_en_lo,
  707. acc_clks - timings->tRHOH_min / t_x);
  708. /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */
  709. rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
  710. t_x);
  711. rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
  712. rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
  713. /* Center the data latch timing for extra safety */
  714. acc_clks = (acc_clks + rdwr_en_lo +
  715. DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2;
  716. acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
  717. tmp = ioread32(denali->reg + ACC_CLKS);
  718. tmp &= ~ACC_CLKS__VALUE;
  719. tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
  720. sel->acc_clks = tmp;
  721. tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
  722. tmp &= ~RDWR_EN_LO_CNT__VALUE;
  723. tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
  724. sel->rdwr_en_lo_cnt = tmp;
  725. /* tCS, tCEA -> CS_SETUP_CNT */
  726. cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
  727. (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
  728. 0);
  729. cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
  730. tmp = ioread32(denali->reg + CS_SETUP_CNT);
  731. tmp &= ~CS_SETUP_CNT__VALUE;
  732. tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
  733. sel->cs_setup_cnt = tmp;
  734. return 0;
  735. }
  736. int denali_calc_ecc_bytes(int step_size, int strength)
  737. {
  738. /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
  739. return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
  740. }
  741. EXPORT_SYMBOL(denali_calc_ecc_bytes);
  742. static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
  743. struct mtd_oob_region *oobregion)
  744. {
  745. struct nand_chip *chip = mtd_to_nand(mtd);
  746. struct denali_controller *denali = to_denali_controller(chip);
  747. if (section > 0)
  748. return -ERANGE;
  749. oobregion->offset = denali->oob_skip_bytes;
  750. oobregion->length = chip->ecc.total;
  751. return 0;
  752. }
  753. static int denali_ooblayout_free(struct mtd_info *mtd, int section,
  754. struct mtd_oob_region *oobregion)
  755. {
  756. struct nand_chip *chip = mtd_to_nand(mtd);
  757. struct denali_controller *denali = to_denali_controller(chip);
  758. if (section > 0)
  759. return -ERANGE;
  760. oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
  761. oobregion->length = mtd->oobsize - oobregion->offset;
  762. return 0;
  763. }
  764. static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
  765. .ecc = denali_ooblayout_ecc,
  766. .free = denali_ooblayout_free,
  767. };
  768. static int denali_multidev_fixup(struct nand_chip *chip)
  769. {
  770. struct denali_controller *denali = to_denali_controller(chip);
  771. struct mtd_info *mtd = nand_to_mtd(chip);
  772. struct nand_memory_organization *memorg;
  773. memorg = nanddev_get_memorg(&chip->base);
  774. /*
  775. * Support for multi device:
  776. * When the IP configuration is x16 capable and two x8 chips are
  777. * connected in parallel, DEVICES_CONNECTED should be set to 2.
  778. * In this case, the core framework knows nothing about this fact,
  779. * so we should tell it the _logical_ pagesize and anything necessary.
  780. */
  781. denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
  782. /*
  783. * On some SoCs, DEVICES_CONNECTED is not auto-detected.
  784. * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
  785. */
  786. if (denali->devs_per_cs == 0) {
  787. denali->devs_per_cs = 1;
  788. iowrite32(1, denali->reg + DEVICES_CONNECTED);
  789. }
  790. if (denali->devs_per_cs == 1)
  791. return 0;
  792. if (denali->devs_per_cs != 2) {
  793. dev_err(denali->dev, "unsupported number of devices %d\n",
  794. denali->devs_per_cs);
  795. return -EINVAL;
  796. }
  797. /* 2 chips in parallel */
  798. memorg->pagesize <<= 1;
  799. memorg->oobsize <<= 1;
  800. mtd->size <<= 1;
  801. mtd->erasesize <<= 1;
  802. mtd->writesize <<= 1;
  803. mtd->oobsize <<= 1;
  804. chip->page_shift += 1;
  805. chip->phys_erase_shift += 1;
  806. chip->bbt_erase_shift += 1;
  807. chip->chip_shift += 1;
  808. chip->pagemask <<= 1;
  809. chip->ecc.size <<= 1;
  810. chip->ecc.bytes <<= 1;
  811. chip->ecc.strength <<= 1;
  812. denali->oob_skip_bytes <<= 1;
  813. return 0;
  814. }
  815. static int denali_attach_chip(struct nand_chip *chip)
  816. {
  817. struct denali_controller *denali = to_denali_controller(chip);
  818. struct mtd_info *mtd = nand_to_mtd(chip);
  819. int ret;
  820. ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
  821. mtd->oobsize - denali->oob_skip_bytes);
  822. if (ret) {
  823. dev_err(denali->dev, "Failed to setup ECC settings.\n");
  824. return ret;
  825. }
  826. dev_dbg(denali->dev,
  827. "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
  828. chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
  829. ret = denali_multidev_fixup(chip);
  830. if (ret)
  831. return ret;
  832. return 0;
  833. }
  834. static void denali_exec_in8(struct denali_controller *denali, u32 type,
  835. u8 *buf, unsigned int len)
  836. {
  837. int i;
  838. for (i = 0; i < len; i++)
  839. buf[i] = denali->host_read(denali, type | DENALI_BANK(denali));
  840. }
  841. static void denali_exec_in16(struct denali_controller *denali, u32 type,
  842. u8 *buf, unsigned int len)
  843. {
  844. u32 data;
  845. int i;
  846. for (i = 0; i < len; i += 2) {
  847. data = denali->host_read(denali, type | DENALI_BANK(denali));
  848. /* bit 31:24 and 15:8 are used for DDR */
  849. buf[i] = data;
  850. buf[i + 1] = data >> 16;
  851. }
  852. }
  853. static void denali_exec_in(struct denali_controller *denali, u32 type,
  854. u8 *buf, unsigned int len, bool width16)
  855. {
  856. if (width16)
  857. denali_exec_in16(denali, type, buf, len);
  858. else
  859. denali_exec_in8(denali, type, buf, len);
  860. }
  861. static void denali_exec_out8(struct denali_controller *denali, u32 type,
  862. const u8 *buf, unsigned int len)
  863. {
  864. int i;
  865. for (i = 0; i < len; i++)
  866. denali->host_write(denali, type | DENALI_BANK(denali), buf[i]);
  867. }
  868. static void denali_exec_out16(struct denali_controller *denali, u32 type,
  869. const u8 *buf, unsigned int len)
  870. {
  871. int i;
  872. for (i = 0; i < len; i += 2)
  873. denali->host_write(denali, type | DENALI_BANK(denali),
  874. buf[i + 1] << 16 | buf[i]);
  875. }
  876. static void denali_exec_out(struct denali_controller *denali, u32 type,
  877. const u8 *buf, unsigned int len, bool width16)
  878. {
  879. if (width16)
  880. denali_exec_out16(denali, type, buf, len);
  881. else
  882. denali_exec_out8(denali, type, buf, len);
  883. }
  884. static int denali_exec_waitrdy(struct denali_controller *denali)
  885. {
  886. u32 irq_stat;
  887. /* R/B# pin transitioned from low to high? */
  888. irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
  889. /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
  890. denali_reset_irq(denali);
  891. return irq_stat & INTR__INT_ACT ? 0 : -EIO;
  892. }
  893. static int denali_exec_instr(struct nand_chip *chip,
  894. const struct nand_op_instr *instr)
  895. {
  896. struct denali_controller *denali = to_denali_controller(chip);
  897. switch (instr->type) {
  898. case NAND_OP_CMD_INSTR:
  899. denali_exec_out8(denali, DENALI_MAP11_CMD,
  900. &instr->ctx.cmd.opcode, 1);
  901. return 0;
  902. case NAND_OP_ADDR_INSTR:
  903. denali_exec_out8(denali, DENALI_MAP11_ADDR,
  904. instr->ctx.addr.addrs,
  905. instr->ctx.addr.naddrs);
  906. return 0;
  907. case NAND_OP_DATA_IN_INSTR:
  908. denali_exec_in(denali, DENALI_MAP11_DATA,
  909. instr->ctx.data.buf.in,
  910. instr->ctx.data.len,
  911. !instr->ctx.data.force_8bit &&
  912. chip->options & NAND_BUSWIDTH_16);
  913. return 0;
  914. case NAND_OP_DATA_OUT_INSTR:
  915. denali_exec_out(denali, DENALI_MAP11_DATA,
  916. instr->ctx.data.buf.out,
  917. instr->ctx.data.len,
  918. !instr->ctx.data.force_8bit &&
  919. chip->options & NAND_BUSWIDTH_16);
  920. return 0;
  921. case NAND_OP_WAITRDY_INSTR:
  922. return denali_exec_waitrdy(denali);
  923. default:
  924. WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
  925. instr->type);
  926. return -EINVAL;
  927. }
  928. }
  929. static int denali_exec_op(struct nand_chip *chip,
  930. const struct nand_operation *op, bool check_only)
  931. {
  932. int i, ret;
  933. if (check_only)
  934. return 0;
  935. denali_select_target(chip, op->cs);
  936. /*
  937. * Some commands contain NAND_OP_WAITRDY_INSTR.
  938. * irq must be cleared here to catch the R/B# interrupt there.
  939. */
  940. denali_reset_irq(to_denali_controller(chip));
  941. for (i = 0; i < op->ninstrs; i++) {
  942. ret = denali_exec_instr(chip, &op->instrs[i]);
  943. if (ret)
  944. return ret;
  945. }
  946. return 0;
  947. }
  948. static const struct nand_controller_ops denali_controller_ops = {
  949. .attach_chip = denali_attach_chip,
  950. .exec_op = denali_exec_op,
  951. .setup_interface = denali_setup_interface,
  952. };
  953. int denali_chip_init(struct denali_controller *denali,
  954. struct denali_chip *dchip)
  955. {
  956. struct nand_chip *chip = &dchip->chip;
  957. struct mtd_info *mtd = nand_to_mtd(chip);
  958. struct denali_chip *dchip2;
  959. int i, j, ret;
  960. chip->controller = &denali->controller;
  961. /* sanity checks for bank numbers */
  962. for (i = 0; i < dchip->nsels; i++) {
  963. unsigned int bank = dchip->sels[i].bank;
  964. if (bank >= denali->nbanks) {
  965. dev_err(denali->dev, "unsupported bank %d\n", bank);
  966. return -EINVAL;
  967. }
  968. for (j = 0; j < i; j++) {
  969. if (bank == dchip->sels[j].bank) {
  970. dev_err(denali->dev,
  971. "bank %d is assigned twice in the same chip\n",
  972. bank);
  973. return -EINVAL;
  974. }
  975. }
  976. list_for_each_entry(dchip2, &denali->chips, node) {
  977. for (j = 0; j < dchip2->nsels; j++) {
  978. if (bank == dchip2->sels[j].bank) {
  979. dev_err(denali->dev,
  980. "bank %d is already used\n",
  981. bank);
  982. return -EINVAL;
  983. }
  984. }
  985. }
  986. }
  987. mtd->dev.parent = denali->dev;
  988. /*
  989. * Fallback to the default name if DT did not give "label" property.
  990. * Use "label" property if multiple chips are connected.
  991. */
  992. if (!mtd->name && list_empty(&denali->chips))
  993. mtd->name = "denali-nand";
  994. if (denali->dma_avail) {
  995. chip->options |= NAND_USES_DMA;
  996. chip->buf_align = 16;
  997. }
  998. /* clk rate info is needed for setup_interface */
  999. if (!denali->clk_rate || !denali->clk_x_rate)
  1000. chip->options |= NAND_KEEP_TIMINGS;
  1001. chip->bbt_options |= NAND_BBT_USE_FLASH;
  1002. chip->bbt_options |= NAND_BBT_NO_OOB;
  1003. chip->options |= NAND_NO_SUBPAGE_WRITE;
  1004. chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  1005. chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
  1006. chip->ecc.read_page = denali_read_page;
  1007. chip->ecc.write_page = denali_write_page;
  1008. chip->ecc.read_page_raw = denali_read_page_raw;
  1009. chip->ecc.write_page_raw = denali_write_page_raw;
  1010. chip->ecc.read_oob = denali_read_oob;
  1011. chip->ecc.write_oob = denali_write_oob;
  1012. mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
  1013. ret = nand_scan(chip, dchip->nsels);
  1014. if (ret)
  1015. return ret;
  1016. ret = mtd_device_register(mtd, NULL, 0);
  1017. if (ret) {
  1018. dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
  1019. goto cleanup_nand;
  1020. }
  1021. list_add_tail(&dchip->node, &denali->chips);
  1022. return 0;
  1023. cleanup_nand:
  1024. nand_cleanup(chip);
  1025. return ret;
  1026. }
  1027. EXPORT_SYMBOL_GPL(denali_chip_init);
  1028. int denali_init(struct denali_controller *denali)
  1029. {
  1030. u32 features = ioread32(denali->reg + FEATURES);
  1031. int ret;
  1032. nand_controller_init(&denali->controller);
  1033. denali->controller.ops = &denali_controller_ops;
  1034. init_completion(&denali->complete);
  1035. spin_lock_init(&denali->irq_lock);
  1036. INIT_LIST_HEAD(&denali->chips);
  1037. denali->active_bank = DENALI_INVALID_BANK;
  1038. /*
  1039. * The REVISION register may not be reliable. Platforms are allowed to
  1040. * override it.
  1041. */
  1042. if (!denali->revision)
  1043. denali->revision = swab16(ioread32(denali->reg + REVISION));
  1044. denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
  1045. /* the encoding changed from rev 5.0 to 5.1 */
  1046. if (denali->revision < 0x0501)
  1047. denali->nbanks <<= 1;
  1048. if (features & FEATURES__DMA)
  1049. denali->dma_avail = true;
  1050. if (denali->dma_avail) {
  1051. int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
  1052. ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
  1053. if (ret) {
  1054. dev_info(denali->dev,
  1055. "Failed to set DMA mask. Disabling DMA.\n");
  1056. denali->dma_avail = false;
  1057. }
  1058. }
  1059. if (denali->dma_avail) {
  1060. if (denali->caps & DENALI_CAP_DMA_64BIT)
  1061. denali->setup_dma = denali_setup_dma64;
  1062. else
  1063. denali->setup_dma = denali_setup_dma32;
  1064. }
  1065. if (features & FEATURES__INDEX_ADDR) {
  1066. denali->host_read = denali_indexed_read;
  1067. denali->host_write = denali_indexed_write;
  1068. } else {
  1069. denali->host_read = denali_direct_read;
  1070. denali->host_write = denali_direct_write;
  1071. }
  1072. /*
  1073. * Set how many bytes should be skipped before writing data in OOB.
  1074. * If a platform requests a non-zero value, set it to the register.
  1075. * Otherwise, read the value out, expecting it has already been set up
  1076. * by firmware.
  1077. */
  1078. if (denali->oob_skip_bytes)
  1079. iowrite32(denali->oob_skip_bytes,
  1080. denali->reg + SPARE_AREA_SKIP_BYTES);
  1081. else
  1082. denali->oob_skip_bytes = ioread32(denali->reg +
  1083. SPARE_AREA_SKIP_BYTES);
  1084. iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
  1085. iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
  1086. iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
  1087. iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
  1088. iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
  1089. iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
  1090. denali_clear_irq_all(denali);
  1091. ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
  1092. IRQF_SHARED, DENALI_NAND_NAME, denali);
  1093. if (ret) {
  1094. dev_err(denali->dev, "Unable to request IRQ\n");
  1095. return ret;
  1096. }
  1097. denali_enable_irq(denali);
  1098. return 0;
  1099. }
  1100. EXPORT_SYMBOL(denali_init);
  1101. void denali_remove(struct denali_controller *denali)
  1102. {
  1103. struct denali_chip *dchip, *tmp;
  1104. struct nand_chip *chip;
  1105. int ret;
  1106. list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
  1107. chip = &dchip->chip;
  1108. ret = mtd_device_unregister(nand_to_mtd(chip));
  1109. WARN_ON(ret);
  1110. nand_cleanup(chip);
  1111. list_del(&dchip->node);
  1112. }
  1113. denali_disable_irq(denali);
  1114. }
  1115. EXPORT_SYMBOL(denali_remove);
  1116. MODULE_DESCRIPTION("Driver core for Denali NAND controller");
  1117. MODULE_AUTHOR("Intel Corporation and its suppliers");
  1118. MODULE_LICENSE("GPL v2");