mtk_nand.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /*
  3. * MTK NAND Flash controller driver.
  4. * Copyright (C) 2016 MediaTek Inc.
  5. * Authors: Xiaolei Li <[email protected]>
  6. * Jorge Ramirez-Ortiz <[email protected]>
  7. */
  8. #include <linux/platform_device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/delay.h>
  12. #include <linux/clk.h>
  13. #include <linux/mtd/rawnand.h>
  14. #include <linux/mtd/mtd.h>
  15. #include <linux/module.h>
  16. #include <linux/iopoll.h>
  17. #include <linux/of.h>
  18. #include <linux/of_device.h>
  19. #include <linux/mtd/nand-ecc-mtk.h>
  20. /* NAND controller register definition */
  21. #define NFI_CNFG (0x00)
  22. #define CNFG_AHB BIT(0)
  23. #define CNFG_READ_EN BIT(1)
  24. #define CNFG_DMA_BURST_EN BIT(2)
  25. #define CNFG_BYTE_RW BIT(6)
  26. #define CNFG_HW_ECC_EN BIT(8)
  27. #define CNFG_AUTO_FMT_EN BIT(9)
  28. #define CNFG_OP_CUST (6 << 12)
  29. #define NFI_PAGEFMT (0x04)
  30. #define PAGEFMT_FDM_ECC_SHIFT (12)
  31. #define PAGEFMT_FDM_SHIFT (8)
  32. #define PAGEFMT_SEC_SEL_512 BIT(2)
  33. #define PAGEFMT_512_2K (0)
  34. #define PAGEFMT_2K_4K (1)
  35. #define PAGEFMT_4K_8K (2)
  36. #define PAGEFMT_8K_16K (3)
  37. /* NFI control */
  38. #define NFI_CON (0x08)
  39. #define CON_FIFO_FLUSH BIT(0)
  40. #define CON_NFI_RST BIT(1)
  41. #define CON_BRD BIT(8) /* burst read */
  42. #define CON_BWR BIT(9) /* burst write */
  43. #define CON_SEC_SHIFT (12)
  44. /* Timming control register */
  45. #define NFI_ACCCON (0x0C)
  46. #define NFI_INTR_EN (0x10)
  47. #define INTR_AHB_DONE_EN BIT(6)
  48. #define NFI_INTR_STA (0x14)
  49. #define NFI_CMD (0x20)
  50. #define NFI_ADDRNOB (0x30)
  51. #define NFI_COLADDR (0x34)
  52. #define NFI_ROWADDR (0x38)
  53. #define NFI_STRDATA (0x40)
  54. #define STAR_EN (1)
  55. #define STAR_DE (0)
  56. #define NFI_CNRNB (0x44)
  57. #define NFI_DATAW (0x50)
  58. #define NFI_DATAR (0x54)
  59. #define NFI_PIO_DIRDY (0x58)
  60. #define PIO_DI_RDY (0x01)
  61. #define NFI_STA (0x60)
  62. #define STA_CMD BIT(0)
  63. #define STA_ADDR BIT(1)
  64. #define STA_BUSY BIT(8)
  65. #define STA_EMP_PAGE BIT(12)
  66. #define NFI_FSM_CUSTDATA (0xe << 16)
  67. #define NFI_FSM_MASK (0xf << 16)
  68. #define NFI_ADDRCNTR (0x70)
  69. #define CNTR_MASK GENMASK(16, 12)
  70. #define ADDRCNTR_SEC_SHIFT (12)
  71. #define ADDRCNTR_SEC(val) \
  72. (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
  73. #define NFI_STRADDR (0x80)
  74. #define NFI_BYTELEN (0x84)
  75. #define NFI_CSEL (0x90)
  76. #define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
  77. #define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
  78. #define NFI_FDM_MAX_SIZE (8)
  79. #define NFI_FDM_MIN_SIZE (1)
  80. #define NFI_DEBUG_CON1 (0x220)
  81. #define STROBE_MASK GENMASK(4, 3)
  82. #define STROBE_SHIFT (3)
  83. #define MAX_STROBE_DLY (3)
  84. #define NFI_MASTER_STA (0x224)
  85. #define MASTER_STA_MASK (0x0FFF)
  86. #define NFI_EMPTY_THRESH (0x23C)
  87. #define MTK_NAME "mtk-nand"
  88. #define KB(x) ((x) * 1024UL)
  89. #define MB(x) (KB(x) * 1024UL)
  90. #define MTK_TIMEOUT (500000)
  91. #define MTK_RESET_TIMEOUT (1000000)
  92. #define MTK_NAND_MAX_NSELS (2)
  93. #define MTK_NFC_MIN_SPARE (16)
  94. #define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
  95. ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
  96. (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
  97. struct mtk_nfc_caps {
  98. const u8 *spare_size;
  99. u8 num_spare_size;
  100. u8 pageformat_spare_shift;
  101. u8 nfi_clk_div;
  102. u8 max_sector;
  103. u32 max_sector_size;
  104. };
  105. struct mtk_nfc_bad_mark_ctl {
  106. void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
  107. u32 sec;
  108. u32 pos;
  109. };
  110. /*
  111. * FDM: region used to store free OOB data
  112. */
  113. struct mtk_nfc_fdm {
  114. u32 reg_size;
  115. u32 ecc_size;
  116. };
  117. struct mtk_nfc_nand_chip {
  118. struct list_head node;
  119. struct nand_chip nand;
  120. struct mtk_nfc_bad_mark_ctl bad_mark;
  121. struct mtk_nfc_fdm fdm;
  122. u32 spare_per_sector;
  123. int nsels;
  124. u8 sels[];
  125. /* nothing after this field */
  126. };
  127. struct mtk_nfc_clk {
  128. struct clk *nfi_clk;
  129. struct clk *pad_clk;
  130. };
  131. struct mtk_nfc {
  132. struct nand_controller controller;
  133. struct mtk_ecc_config ecc_cfg;
  134. struct mtk_nfc_clk clk;
  135. struct mtk_ecc *ecc;
  136. struct device *dev;
  137. const struct mtk_nfc_caps *caps;
  138. void __iomem *regs;
  139. struct completion done;
  140. struct list_head chips;
  141. u8 *buffer;
  142. unsigned long assigned_cs;
  143. };
  144. /*
  145. * supported spare size of each IP.
  146. * order should be the same with the spare size bitfiled defination of
  147. * register NFI_PAGEFMT.
  148. */
  149. static const u8 spare_size_mt2701[] = {
  150. 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64
  151. };
  152. static const u8 spare_size_mt2712[] = {
  153. 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
  154. 74
  155. };
  156. static const u8 spare_size_mt7622[] = {
  157. 16, 26, 27, 28
  158. };
  159. static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
  160. {
  161. return container_of(nand, struct mtk_nfc_nand_chip, nand);
  162. }
  163. static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
  164. {
  165. return (u8 *)p + i * chip->ecc.size;
  166. }
  167. static inline u8 *oob_ptr(struct nand_chip *chip, int i)
  168. {
  169. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  170. u8 *poi;
  171. /* map the sector's FDM data to free oob:
  172. * the beginning of the oob area stores the FDM data of bad mark sectors
  173. */
  174. if (i < mtk_nand->bad_mark.sec)
  175. poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
  176. else if (i == mtk_nand->bad_mark.sec)
  177. poi = chip->oob_poi;
  178. else
  179. poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
  180. return poi;
  181. }
  182. static inline int mtk_data_len(struct nand_chip *chip)
  183. {
  184. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  185. return chip->ecc.size + mtk_nand->spare_per_sector;
  186. }
  187. static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i)
  188. {
  189. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  190. return nfc->buffer + i * mtk_data_len(chip);
  191. }
  192. static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
  193. {
  194. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  195. return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
  196. }
  197. static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
  198. {
  199. writel(val, nfc->regs + reg);
  200. }
  201. static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
  202. {
  203. writew(val, nfc->regs + reg);
  204. }
  205. static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
  206. {
  207. writeb(val, nfc->regs + reg);
  208. }
  209. static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
  210. {
  211. return readl_relaxed(nfc->regs + reg);
  212. }
  213. static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
  214. {
  215. return readw_relaxed(nfc->regs + reg);
  216. }
  217. static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
  218. {
  219. return readb_relaxed(nfc->regs + reg);
  220. }
  221. static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
  222. {
  223. struct device *dev = nfc->dev;
  224. u32 val;
  225. int ret;
  226. /* reset all registers and force the NFI master to terminate */
  227. nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
  228. /* wait for the master to finish the last transaction */
  229. ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
  230. !(val & MASTER_STA_MASK), 50,
  231. MTK_RESET_TIMEOUT);
  232. if (ret)
  233. dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
  234. NFI_MASTER_STA, val);
  235. /* ensure any status register affected by the NFI master is reset */
  236. nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
  237. nfi_writew(nfc, STAR_DE, NFI_STRDATA);
  238. }
  239. static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
  240. {
  241. struct device *dev = nfc->dev;
  242. u32 val;
  243. int ret;
  244. nfi_writel(nfc, command, NFI_CMD);
  245. ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
  246. !(val & STA_CMD), 10, MTK_TIMEOUT);
  247. if (ret) {
  248. dev_warn(dev, "nfi core timed out entering command mode\n");
  249. return -EIO;
  250. }
  251. return 0;
  252. }
  253. static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
  254. {
  255. struct device *dev = nfc->dev;
  256. u32 val;
  257. int ret;
  258. nfi_writel(nfc, addr, NFI_COLADDR);
  259. nfi_writel(nfc, 0, NFI_ROWADDR);
  260. nfi_writew(nfc, 1, NFI_ADDRNOB);
  261. ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
  262. !(val & STA_ADDR), 10, MTK_TIMEOUT);
  263. if (ret) {
  264. dev_warn(dev, "nfi core timed out entering address mode\n");
  265. return -EIO;
  266. }
  267. return 0;
  268. }
  269. static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
  270. {
  271. struct nand_chip *chip = mtd_to_nand(mtd);
  272. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  273. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  274. u32 fmt, spare, i;
  275. if (!mtd->writesize)
  276. return 0;
  277. spare = mtk_nand->spare_per_sector;
  278. switch (mtd->writesize) {
  279. case 512:
  280. fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
  281. break;
  282. case KB(2):
  283. if (chip->ecc.size == 512)
  284. fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
  285. else
  286. fmt = PAGEFMT_512_2K;
  287. break;
  288. case KB(4):
  289. if (chip->ecc.size == 512)
  290. fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
  291. else
  292. fmt = PAGEFMT_2K_4K;
  293. break;
  294. case KB(8):
  295. if (chip->ecc.size == 512)
  296. fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
  297. else
  298. fmt = PAGEFMT_4K_8K;
  299. break;
  300. case KB(16):
  301. fmt = PAGEFMT_8K_16K;
  302. break;
  303. default:
  304. dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
  305. return -EINVAL;
  306. }
  307. /*
  308. * the hardware will double the value for this eccsize, so we need to
  309. * halve it
  310. */
  311. if (chip->ecc.size == 1024)
  312. spare >>= 1;
  313. for (i = 0; i < nfc->caps->num_spare_size; i++) {
  314. if (nfc->caps->spare_size[i] == spare)
  315. break;
  316. }
  317. if (i == nfc->caps->num_spare_size) {
  318. dev_err(nfc->dev, "invalid spare size %d\n", spare);
  319. return -EINVAL;
  320. }
  321. fmt |= i << nfc->caps->pageformat_spare_shift;
  322. fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
  323. fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
  324. nfi_writel(nfc, fmt, NFI_PAGEFMT);
  325. nfc->ecc_cfg.strength = chip->ecc.strength;
  326. nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
  327. return 0;
  328. }
  329. static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
  330. {
  331. int rc;
  332. u8 val;
  333. rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
  334. val & PIO_DI_RDY, 10, MTK_TIMEOUT);
  335. if (rc < 0)
  336. dev_err(nfc->dev, "data not ready\n");
  337. }
  338. static inline u8 mtk_nfc_read_byte(struct nand_chip *chip)
  339. {
  340. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  341. u32 reg;
  342. /* after each byte read, the NFI_STA reg is reset by the hardware */
  343. reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
  344. if (reg != NFI_FSM_CUSTDATA) {
  345. reg = nfi_readw(nfc, NFI_CNFG);
  346. reg |= CNFG_BYTE_RW | CNFG_READ_EN;
  347. nfi_writew(nfc, reg, NFI_CNFG);
  348. /*
  349. * set to max sector to allow the HW to continue reading over
  350. * unaligned accesses
  351. */
  352. reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
  353. nfi_writel(nfc, reg, NFI_CON);
  354. /* trigger to fetch data */
  355. nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  356. }
  357. mtk_nfc_wait_ioready(nfc);
  358. return nfi_readb(nfc, NFI_DATAR);
  359. }
  360. static void mtk_nfc_read_buf(struct nand_chip *chip, u8 *buf, int len)
  361. {
  362. int i;
  363. for (i = 0; i < len; i++)
  364. buf[i] = mtk_nfc_read_byte(chip);
  365. }
  366. static void mtk_nfc_write_byte(struct nand_chip *chip, u8 byte)
  367. {
  368. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  369. u32 reg;
  370. reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
  371. if (reg != NFI_FSM_CUSTDATA) {
  372. reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
  373. nfi_writew(nfc, reg, NFI_CNFG);
  374. reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
  375. nfi_writel(nfc, reg, NFI_CON);
  376. nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  377. }
  378. mtk_nfc_wait_ioready(nfc);
  379. nfi_writeb(nfc, byte, NFI_DATAW);
  380. }
  381. static void mtk_nfc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
  382. {
  383. int i;
  384. for (i = 0; i < len; i++)
  385. mtk_nfc_write_byte(chip, buf[i]);
  386. }
  387. static int mtk_nfc_exec_instr(struct nand_chip *chip,
  388. const struct nand_op_instr *instr)
  389. {
  390. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  391. unsigned int i;
  392. u32 status;
  393. switch (instr->type) {
  394. case NAND_OP_CMD_INSTR:
  395. mtk_nfc_send_command(nfc, instr->ctx.cmd.opcode);
  396. return 0;
  397. case NAND_OP_ADDR_INSTR:
  398. for (i = 0; i < instr->ctx.addr.naddrs; i++)
  399. mtk_nfc_send_address(nfc, instr->ctx.addr.addrs[i]);
  400. return 0;
  401. case NAND_OP_DATA_IN_INSTR:
  402. mtk_nfc_read_buf(chip, instr->ctx.data.buf.in,
  403. instr->ctx.data.len);
  404. return 0;
  405. case NAND_OP_DATA_OUT_INSTR:
  406. mtk_nfc_write_buf(chip, instr->ctx.data.buf.out,
  407. instr->ctx.data.len);
  408. return 0;
  409. case NAND_OP_WAITRDY_INSTR:
  410. return readl_poll_timeout(nfc->regs + NFI_STA, status,
  411. !(status & STA_BUSY), 20,
  412. instr->ctx.waitrdy.timeout_ms * 1000);
  413. default:
  414. break;
  415. }
  416. return -EINVAL;
  417. }
  418. static void mtk_nfc_select_target(struct nand_chip *nand, unsigned int cs)
  419. {
  420. struct mtk_nfc *nfc = nand_get_controller_data(nand);
  421. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
  422. mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
  423. nfi_writel(nfc, mtk_nand->sels[cs], NFI_CSEL);
  424. }
  425. static int mtk_nfc_exec_op(struct nand_chip *chip,
  426. const struct nand_operation *op,
  427. bool check_only)
  428. {
  429. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  430. unsigned int i;
  431. int ret = 0;
  432. if (check_only)
  433. return 0;
  434. mtk_nfc_hw_reset(nfc);
  435. nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
  436. mtk_nfc_select_target(chip, op->cs);
  437. for (i = 0; i < op->ninstrs; i++) {
  438. ret = mtk_nfc_exec_instr(chip, &op->instrs[i]);
  439. if (ret)
  440. break;
  441. }
  442. return ret;
  443. }
  444. static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline,
  445. const struct nand_interface_config *conf)
  446. {
  447. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  448. const struct nand_sdr_timings *timings;
  449. u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
  450. u32 temp, tsel = 0;
  451. timings = nand_get_sdr_timings(conf);
  452. if (IS_ERR(timings))
  453. return -ENOTSUPP;
  454. if (csline == NAND_DATA_IFACE_CHECK_ONLY)
  455. return 0;
  456. rate = clk_get_rate(nfc->clk.nfi_clk);
  457. /* There is a frequency divider in some IPs */
  458. rate /= nfc->caps->nfi_clk_div;
  459. /* turn clock rate into KHZ */
  460. rate /= 1000;
  461. tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000;
  462. tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000);
  463. tpoecs &= 0xf;
  464. tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000;
  465. tprecs = DIV_ROUND_UP(tprecs * rate, 1000000);
  466. tprecs &= 0x3f;
  467. /* sdr interface has no tCR which means CE# low to RE# low */
  468. tc2r = 0;
  469. tw2r = timings->tWHR_min / 1000;
  470. tw2r = DIV_ROUND_UP(tw2r * rate, 1000000);
  471. tw2r = DIV_ROUND_UP(tw2r - 1, 2);
  472. tw2r &= 0xf;
  473. twh = max(timings->tREH_min, timings->tWH_min) / 1000;
  474. twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
  475. twh &= 0xf;
  476. /* Calculate real WE#/RE# hold time in nanosecond */
  477. temp = (twh + 1) * 1000000 / rate;
  478. /* nanosecond to picosecond */
  479. temp *= 1000;
  480. /*
  481. * WE# low level time should be expaned to meet WE# pulse time
  482. * and WE# cycle time at the same time.
  483. */
  484. if (temp < timings->tWC_min)
  485. twst = timings->tWC_min - temp;
  486. twst = max(timings->tWP_min, twst) / 1000;
  487. twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
  488. twst &= 0xf;
  489. /*
  490. * RE# low level time should be expaned to meet RE# pulse time
  491. * and RE# cycle time at the same time.
  492. */
  493. if (temp < timings->tRC_min)
  494. trlt = timings->tRC_min - temp;
  495. trlt = max(trlt, timings->tRP_min) / 1000;
  496. trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
  497. trlt &= 0xf;
  498. /* Calculate RE# pulse time in nanosecond. */
  499. temp = (trlt + 1) * 1000000 / rate;
  500. /* nanosecond to picosecond */
  501. temp *= 1000;
  502. /*
  503. * If RE# access time is bigger than RE# pulse time,
  504. * delay sampling data timing.
  505. */
  506. if (temp < timings->tREA_max) {
  507. tsel = timings->tREA_max / 1000;
  508. tsel = DIV_ROUND_UP(tsel * rate, 1000000);
  509. tsel -= (trlt + 1);
  510. if (tsel > MAX_STROBE_DLY) {
  511. trlt += tsel - MAX_STROBE_DLY;
  512. tsel = MAX_STROBE_DLY;
  513. }
  514. }
  515. temp = nfi_readl(nfc, NFI_DEBUG_CON1);
  516. temp &= ~STROBE_MASK;
  517. temp |= tsel << STROBE_SHIFT;
  518. nfi_writel(nfc, temp, NFI_DEBUG_CON1);
  519. /*
  520. * ACCON: access timing control register
  521. * -------------------------------------
  522. * 31:28: tpoecs, minimum required time for CS post pulling down after
  523. * accessing the device
  524. * 27:22: tprecs, minimum required time for CS pre pulling down before
  525. * accessing the device
  526. * 21:16: tc2r, minimum required time from NCEB low to NREB low
  527. * 15:12: tw2r, minimum required time from NWEB high to NREB low.
  528. * 11:08: twh, write enable hold time
  529. * 07:04: twst, write wait states
  530. * 03:00: trlt, read wait states
  531. */
  532. trlt = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
  533. nfi_writel(nfc, trlt, NFI_ACCCON);
  534. return 0;
  535. }
  536. static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
  537. {
  538. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  539. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  540. int size = chip->ecc.size + mtk_nand->fdm.reg_size;
  541. nfc->ecc_cfg.mode = ECC_DMA_MODE;
  542. nfc->ecc_cfg.op = ECC_ENCODE;
  543. return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
  544. }
  545. static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
  546. {
  547. /* nop */
  548. }
  549. static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
  550. {
  551. struct nand_chip *chip = mtd_to_nand(mtd);
  552. struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
  553. u32 bad_pos = nand->bad_mark.pos;
  554. if (raw)
  555. bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
  556. else
  557. bad_pos += nand->bad_mark.sec * chip->ecc.size;
  558. swap(chip->oob_poi[0], buf[bad_pos]);
  559. }
  560. static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
  561. u32 len, const u8 *buf)
  562. {
  563. struct nand_chip *chip = mtd_to_nand(mtd);
  564. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  565. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  566. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  567. u32 start, end;
  568. int i, ret;
  569. start = offset / chip->ecc.size;
  570. end = DIV_ROUND_UP(offset + len, chip->ecc.size);
  571. memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  572. for (i = 0; i < chip->ecc.steps; i++) {
  573. memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
  574. chip->ecc.size);
  575. if (start > i || i >= end)
  576. continue;
  577. if (i == mtk_nand->bad_mark.sec)
  578. mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  579. memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
  580. /* program the CRC back to the OOB */
  581. ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
  582. if (ret < 0)
  583. return ret;
  584. }
  585. return 0;
  586. }
  587. static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
  588. {
  589. struct nand_chip *chip = mtd_to_nand(mtd);
  590. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  591. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  592. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  593. u32 i;
  594. memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  595. for (i = 0; i < chip->ecc.steps; i++) {
  596. if (buf)
  597. memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
  598. chip->ecc.size);
  599. if (i == mtk_nand->bad_mark.sec)
  600. mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  601. memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
  602. }
  603. }
  604. static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
  605. u32 sectors)
  606. {
  607. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  608. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  609. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  610. u32 vall, valm;
  611. u8 *oobptr;
  612. int i, j;
  613. for (i = 0; i < sectors; i++) {
  614. oobptr = oob_ptr(chip, start + i);
  615. vall = nfi_readl(nfc, NFI_FDML(i));
  616. valm = nfi_readl(nfc, NFI_FDMM(i));
  617. for (j = 0; j < fdm->reg_size; j++)
  618. oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
  619. }
  620. }
  621. static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
  622. {
  623. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  624. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  625. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  626. u32 vall, valm;
  627. u8 *oobptr;
  628. int i, j;
  629. for (i = 0; i < chip->ecc.steps; i++) {
  630. oobptr = oob_ptr(chip, i);
  631. vall = 0;
  632. valm = 0;
  633. for (j = 0; j < 8; j++) {
  634. if (j < 4)
  635. vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
  636. << (j * 8);
  637. else
  638. valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
  639. << ((j - 4) * 8);
  640. }
  641. nfi_writel(nfc, vall, NFI_FDML(i));
  642. nfi_writel(nfc, valm, NFI_FDMM(i));
  643. }
  644. }
  645. static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  646. const u8 *buf, int page, int len)
  647. {
  648. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  649. struct device *dev = nfc->dev;
  650. dma_addr_t addr;
  651. u32 reg;
  652. int ret;
  653. addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
  654. ret = dma_mapping_error(nfc->dev, addr);
  655. if (ret) {
  656. dev_err(nfc->dev, "dma mapping error\n");
  657. return -EINVAL;
  658. }
  659. reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
  660. nfi_writew(nfc, reg, NFI_CNFG);
  661. nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
  662. nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
  663. nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
  664. init_completion(&nfc->done);
  665. reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
  666. nfi_writel(nfc, reg, NFI_CON);
  667. nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  668. ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
  669. if (!ret) {
  670. dev_err(dev, "program ahb done timeout\n");
  671. nfi_writew(nfc, 0, NFI_INTR_EN);
  672. ret = -ETIMEDOUT;
  673. goto timeout;
  674. }
  675. ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
  676. ADDRCNTR_SEC(reg) >= chip->ecc.steps,
  677. 10, MTK_TIMEOUT);
  678. if (ret)
  679. dev_err(dev, "hwecc write timeout\n");
  680. timeout:
  681. dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
  682. nfi_writel(nfc, 0, NFI_CON);
  683. return ret;
  684. }
  685. static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  686. const u8 *buf, int page, int raw)
  687. {
  688. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  689. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  690. size_t len;
  691. const u8 *bufpoi;
  692. u32 reg;
  693. int ret;
  694. mtk_nfc_select_target(chip, chip->cur_cs);
  695. nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  696. if (!raw) {
  697. /* OOB => FDM: from register, ECC: from HW */
  698. reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
  699. nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
  700. nfc->ecc_cfg.op = ECC_ENCODE;
  701. nfc->ecc_cfg.mode = ECC_NFI_MODE;
  702. ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
  703. if (ret) {
  704. /* clear NFI config */
  705. reg = nfi_readw(nfc, NFI_CNFG);
  706. reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
  707. nfi_writew(nfc, reg, NFI_CNFG);
  708. return ret;
  709. }
  710. memcpy(nfc->buffer, buf, mtd->writesize);
  711. mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
  712. bufpoi = nfc->buffer;
  713. /* write OOB into the FDM registers (OOB area in MTK NAND) */
  714. mtk_nfc_write_fdm(chip);
  715. } else {
  716. bufpoi = buf;
  717. }
  718. len = mtd->writesize + (raw ? mtd->oobsize : 0);
  719. ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
  720. if (!raw)
  721. mtk_ecc_disable(nfc->ecc);
  722. if (ret)
  723. return ret;
  724. return nand_prog_page_end_op(chip);
  725. }
  726. static int mtk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
  727. int oob_on, int page)
  728. {
  729. return mtk_nfc_write_page(nand_to_mtd(chip), chip, buf, page, 0);
  730. }
  731. static int mtk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
  732. int oob_on, int pg)
  733. {
  734. struct mtd_info *mtd = nand_to_mtd(chip);
  735. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  736. mtk_nfc_format_page(mtd, buf);
  737. return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
  738. }
  739. static int mtk_nfc_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
  740. u32 data_len, const u8 *buf,
  741. int oob_on, int page)
  742. {
  743. struct mtd_info *mtd = nand_to_mtd(chip);
  744. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  745. int ret;
  746. ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
  747. if (ret < 0)
  748. return ret;
  749. /* use the data in the private buffer (now with FDM and CRC) */
  750. return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
  751. }
  752. static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
  753. {
  754. return mtk_nfc_write_page_raw(chip, NULL, 1, page);
  755. }
  756. static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
  757. u32 sectors)
  758. {
  759. struct nand_chip *chip = mtd_to_nand(mtd);
  760. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  761. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  762. struct mtk_ecc_stats stats;
  763. u32 reg_size = mtk_nand->fdm.reg_size;
  764. int rc, i;
  765. rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
  766. if (rc) {
  767. memset(buf, 0xff, sectors * chip->ecc.size);
  768. for (i = 0; i < sectors; i++)
  769. memset(oob_ptr(chip, start + i), 0xff, reg_size);
  770. return 0;
  771. }
  772. mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
  773. mtd->ecc_stats.corrected += stats.corrected;
  774. mtd->ecc_stats.failed += stats.failed;
  775. return stats.bitflips;
  776. }
  777. static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
  778. u32 data_offs, u32 readlen,
  779. u8 *bufpoi, int page, int raw)
  780. {
  781. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  782. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  783. u32 spare = mtk_nand->spare_per_sector;
  784. u32 column, sectors, start, end, reg;
  785. dma_addr_t addr;
  786. int bitflips = 0;
  787. size_t len;
  788. u8 *buf;
  789. int rc;
  790. mtk_nfc_select_target(chip, chip->cur_cs);
  791. start = data_offs / chip->ecc.size;
  792. end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
  793. sectors = end - start;
  794. column = start * (chip->ecc.size + spare);
  795. len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
  796. buf = bufpoi + start * chip->ecc.size;
  797. nand_read_page_op(chip, page, column, NULL, 0);
  798. addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
  799. rc = dma_mapping_error(nfc->dev, addr);
  800. if (rc) {
  801. dev_err(nfc->dev, "dma mapping error\n");
  802. return -EINVAL;
  803. }
  804. reg = nfi_readw(nfc, NFI_CNFG);
  805. reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
  806. if (!raw) {
  807. reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
  808. nfi_writew(nfc, reg, NFI_CNFG);
  809. nfc->ecc_cfg.mode = ECC_NFI_MODE;
  810. nfc->ecc_cfg.sectors = sectors;
  811. nfc->ecc_cfg.op = ECC_DECODE;
  812. rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
  813. if (rc) {
  814. dev_err(nfc->dev, "ecc enable\n");
  815. /* clear NFI_CNFG */
  816. reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
  817. CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
  818. nfi_writew(nfc, reg, NFI_CNFG);
  819. dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
  820. return rc;
  821. }
  822. } else {
  823. nfi_writew(nfc, reg, NFI_CNFG);
  824. }
  825. nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
  826. nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
  827. nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
  828. init_completion(&nfc->done);
  829. reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
  830. nfi_writel(nfc, reg, NFI_CON);
  831. nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  832. rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
  833. if (!rc)
  834. dev_warn(nfc->dev, "read ahb/dma done timeout\n");
  835. rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
  836. ADDRCNTR_SEC(reg) >= sectors, 10,
  837. MTK_TIMEOUT);
  838. if (rc < 0) {
  839. dev_err(nfc->dev, "subpage done timeout\n");
  840. bitflips = -EIO;
  841. } else if (!raw) {
  842. rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
  843. bitflips = rc < 0 ? -ETIMEDOUT :
  844. mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
  845. mtk_nfc_read_fdm(chip, start, sectors);
  846. }
  847. dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
  848. if (raw)
  849. goto done;
  850. mtk_ecc_disable(nfc->ecc);
  851. if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
  852. mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
  853. done:
  854. nfi_writel(nfc, 0, NFI_CON);
  855. return bitflips;
  856. }
  857. static int mtk_nfc_read_subpage_hwecc(struct nand_chip *chip, u32 off,
  858. u32 len, u8 *p, int pg)
  859. {
  860. return mtk_nfc_read_subpage(nand_to_mtd(chip), chip, off, len, p, pg,
  861. 0);
  862. }
  863. static int mtk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *p, int oob_on,
  864. int pg)
  865. {
  866. struct mtd_info *mtd = nand_to_mtd(chip);
  867. return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
  868. }
  869. static int mtk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on,
  870. int page)
  871. {
  872. struct mtd_info *mtd = nand_to_mtd(chip);
  873. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  874. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  875. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  876. int i, ret;
  877. memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  878. ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
  879. page, 1);
  880. if (ret < 0)
  881. return ret;
  882. for (i = 0; i < chip->ecc.steps; i++) {
  883. memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
  884. if (i == mtk_nand->bad_mark.sec)
  885. mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  886. if (buf)
  887. memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
  888. chip->ecc.size);
  889. }
  890. return ret;
  891. }
  892. static int mtk_nfc_read_oob_std(struct nand_chip *chip, int page)
  893. {
  894. return mtk_nfc_read_page_raw(chip, NULL, 1, page);
  895. }
  896. static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
  897. {
  898. /*
  899. * CNRNB: nand ready/busy register
  900. * -------------------------------
  901. * 7:4: timeout register for polling the NAND busy/ready signal
  902. * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
  903. */
  904. nfi_writew(nfc, 0xf1, NFI_CNRNB);
  905. nfi_writel(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
  906. mtk_nfc_hw_reset(nfc);
  907. nfi_readl(nfc, NFI_INTR_STA);
  908. nfi_writel(nfc, 0, NFI_INTR_EN);
  909. }
  910. static irqreturn_t mtk_nfc_irq(int irq, void *id)
  911. {
  912. struct mtk_nfc *nfc = id;
  913. u16 sta, ien;
  914. sta = nfi_readw(nfc, NFI_INTR_STA);
  915. ien = nfi_readw(nfc, NFI_INTR_EN);
  916. if (!(sta & ien))
  917. return IRQ_NONE;
  918. nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
  919. complete(&nfc->done);
  920. return IRQ_HANDLED;
  921. }
  922. static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
  923. {
  924. int ret;
  925. ret = clk_prepare_enable(clk->nfi_clk);
  926. if (ret) {
  927. dev_err(dev, "failed to enable nfi clk\n");
  928. return ret;
  929. }
  930. ret = clk_prepare_enable(clk->pad_clk);
  931. if (ret) {
  932. dev_err(dev, "failed to enable pad clk\n");
  933. clk_disable_unprepare(clk->nfi_clk);
  934. return ret;
  935. }
  936. return 0;
  937. }
  938. static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
  939. {
  940. clk_disable_unprepare(clk->nfi_clk);
  941. clk_disable_unprepare(clk->pad_clk);
  942. }
  943. static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
  944. struct mtd_oob_region *oob_region)
  945. {
  946. struct nand_chip *chip = mtd_to_nand(mtd);
  947. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  948. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  949. u32 eccsteps;
  950. eccsteps = mtd->writesize / chip->ecc.size;
  951. if (section >= eccsteps)
  952. return -ERANGE;
  953. oob_region->length = fdm->reg_size - fdm->ecc_size;
  954. oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
  955. return 0;
  956. }
  957. static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
  958. struct mtd_oob_region *oob_region)
  959. {
  960. struct nand_chip *chip = mtd_to_nand(mtd);
  961. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  962. u32 eccsteps;
  963. if (section)
  964. return -ERANGE;
  965. eccsteps = mtd->writesize / chip->ecc.size;
  966. oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
  967. oob_region->length = mtd->oobsize - oob_region->offset;
  968. return 0;
  969. }
  970. static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
  971. .free = mtk_nfc_ooblayout_free,
  972. .ecc = mtk_nfc_ooblayout_ecc,
  973. };
  974. static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
  975. {
  976. struct nand_chip *nand = mtd_to_nand(mtd);
  977. struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
  978. struct mtk_nfc *nfc = nand_get_controller_data(nand);
  979. u32 ecc_bytes;
  980. ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
  981. mtk_ecc_get_parity_bits(nfc->ecc), 8);
  982. fdm->reg_size = chip->spare_per_sector - ecc_bytes;
  983. if (fdm->reg_size > NFI_FDM_MAX_SIZE)
  984. fdm->reg_size = NFI_FDM_MAX_SIZE;
  985. /* bad block mark storage */
  986. fdm->ecc_size = 1;
  987. }
  988. static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
  989. struct mtd_info *mtd)
  990. {
  991. struct nand_chip *nand = mtd_to_nand(mtd);
  992. if (mtd->writesize == 512) {
  993. bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
  994. } else {
  995. bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
  996. bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
  997. bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
  998. }
  999. }
  1000. static int mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
  1001. {
  1002. struct nand_chip *nand = mtd_to_nand(mtd);
  1003. struct mtk_nfc *nfc = nand_get_controller_data(nand);
  1004. const u8 *spare = nfc->caps->spare_size;
  1005. u32 eccsteps, i, closest_spare = 0;
  1006. eccsteps = mtd->writesize / nand->ecc.size;
  1007. *sps = mtd->oobsize / eccsteps;
  1008. if (nand->ecc.size == 1024)
  1009. *sps >>= 1;
  1010. if (*sps < MTK_NFC_MIN_SPARE)
  1011. return -EINVAL;
  1012. for (i = 0; i < nfc->caps->num_spare_size; i++) {
  1013. if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
  1014. closest_spare = i;
  1015. if (*sps == spare[i])
  1016. break;
  1017. }
  1018. }
  1019. *sps = spare[closest_spare];
  1020. if (nand->ecc.size == 1024)
  1021. *sps <<= 1;
  1022. return 0;
  1023. }
  1024. static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
  1025. {
  1026. struct nand_chip *nand = mtd_to_nand(mtd);
  1027. const struct nand_ecc_props *requirements =
  1028. nanddev_get_ecc_requirements(&nand->base);
  1029. struct mtk_nfc *nfc = nand_get_controller_data(nand);
  1030. u32 spare;
  1031. int free, ret;
  1032. /* support only ecc hw mode */
  1033. if (nand->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
  1034. dev_err(dev, "ecc.engine_type not supported\n");
  1035. return -EINVAL;
  1036. }
  1037. /* if optional dt settings not present */
  1038. if (!nand->ecc.size || !nand->ecc.strength) {
  1039. /* use datasheet requirements */
  1040. nand->ecc.strength = requirements->strength;
  1041. nand->ecc.size = requirements->step_size;
  1042. /*
  1043. * align eccstrength and eccsize
  1044. * this controller only supports 512 and 1024 sizes
  1045. */
  1046. if (nand->ecc.size < 1024) {
  1047. if (mtd->writesize > 512 &&
  1048. nfc->caps->max_sector_size > 512) {
  1049. nand->ecc.size = 1024;
  1050. nand->ecc.strength <<= 1;
  1051. } else {
  1052. nand->ecc.size = 512;
  1053. }
  1054. } else {
  1055. nand->ecc.size = 1024;
  1056. }
  1057. ret = mtk_nfc_set_spare_per_sector(&spare, mtd);
  1058. if (ret)
  1059. return ret;
  1060. /* calculate oob bytes except ecc parity data */
  1061. free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
  1062. + 7) >> 3;
  1063. free = spare - free;
  1064. /*
  1065. * enhance ecc strength if oob left is bigger than max FDM size
  1066. * or reduce ecc strength if oob size is not enough for ecc
  1067. * parity data.
  1068. */
  1069. if (free > NFI_FDM_MAX_SIZE) {
  1070. spare -= NFI_FDM_MAX_SIZE;
  1071. nand->ecc.strength = (spare << 3) /
  1072. mtk_ecc_get_parity_bits(nfc->ecc);
  1073. } else if (free < 0) {
  1074. spare -= NFI_FDM_MIN_SIZE;
  1075. nand->ecc.strength = (spare << 3) /
  1076. mtk_ecc_get_parity_bits(nfc->ecc);
  1077. }
  1078. }
  1079. mtk_ecc_adjust_strength(nfc->ecc, &nand->ecc.strength);
  1080. dev_info(dev, "eccsize %d eccstrength %d\n",
  1081. nand->ecc.size, nand->ecc.strength);
  1082. return 0;
  1083. }
  1084. static int mtk_nfc_attach_chip(struct nand_chip *chip)
  1085. {
  1086. struct mtd_info *mtd = nand_to_mtd(chip);
  1087. struct device *dev = mtd->dev.parent;
  1088. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1089. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1090. int len;
  1091. int ret;
  1092. if (chip->options & NAND_BUSWIDTH_16) {
  1093. dev_err(dev, "16bits buswidth not supported");
  1094. return -EINVAL;
  1095. }
  1096. /* store bbt magic in page, cause OOB is not protected */
  1097. if (chip->bbt_options & NAND_BBT_USE_FLASH)
  1098. chip->bbt_options |= NAND_BBT_NO_OOB;
  1099. ret = mtk_nfc_ecc_init(dev, mtd);
  1100. if (ret)
  1101. return ret;
  1102. ret = mtk_nfc_set_spare_per_sector(&mtk_nand->spare_per_sector, mtd);
  1103. if (ret)
  1104. return ret;
  1105. mtk_nfc_set_fdm(&mtk_nand->fdm, mtd);
  1106. mtk_nfc_set_bad_mark_ctl(&mtk_nand->bad_mark, mtd);
  1107. len = mtd->writesize + mtd->oobsize;
  1108. nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
  1109. if (!nfc->buffer)
  1110. return -ENOMEM;
  1111. return 0;
  1112. }
  1113. static const struct nand_controller_ops mtk_nfc_controller_ops = {
  1114. .attach_chip = mtk_nfc_attach_chip,
  1115. .setup_interface = mtk_nfc_setup_interface,
  1116. .exec_op = mtk_nfc_exec_op,
  1117. };
  1118. static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
  1119. struct device_node *np)
  1120. {
  1121. struct mtk_nfc_nand_chip *chip;
  1122. struct nand_chip *nand;
  1123. struct mtd_info *mtd;
  1124. int nsels;
  1125. u32 tmp;
  1126. int ret;
  1127. int i;
  1128. if (!of_get_property(np, "reg", &nsels))
  1129. return -ENODEV;
  1130. nsels /= sizeof(u32);
  1131. if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
  1132. dev_err(dev, "invalid reg property size %d\n", nsels);
  1133. return -EINVAL;
  1134. }
  1135. chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
  1136. GFP_KERNEL);
  1137. if (!chip)
  1138. return -ENOMEM;
  1139. chip->nsels = nsels;
  1140. for (i = 0; i < nsels; i++) {
  1141. ret = of_property_read_u32_index(np, "reg", i, &tmp);
  1142. if (ret) {
  1143. dev_err(dev, "reg property failure : %d\n", ret);
  1144. return ret;
  1145. }
  1146. if (tmp >= MTK_NAND_MAX_NSELS) {
  1147. dev_err(dev, "invalid CS: %u\n", tmp);
  1148. return -EINVAL;
  1149. }
  1150. if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
  1151. dev_err(dev, "CS %u already assigned\n", tmp);
  1152. return -EINVAL;
  1153. }
  1154. chip->sels[i] = tmp;
  1155. }
  1156. nand = &chip->nand;
  1157. nand->controller = &nfc->controller;
  1158. nand_set_flash_node(nand, np);
  1159. nand_set_controller_data(nand, nfc);
  1160. nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
  1161. /* set default mode in case dt entry is missing */
  1162. nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  1163. nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
  1164. nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
  1165. nand->ecc.write_page = mtk_nfc_write_page_hwecc;
  1166. nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
  1167. nand->ecc.write_oob = mtk_nfc_write_oob_std;
  1168. nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
  1169. nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
  1170. nand->ecc.read_page = mtk_nfc_read_page_hwecc;
  1171. nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
  1172. nand->ecc.read_oob = mtk_nfc_read_oob_std;
  1173. mtd = nand_to_mtd(nand);
  1174. mtd->owner = THIS_MODULE;
  1175. mtd->dev.parent = dev;
  1176. mtd->name = MTK_NAME;
  1177. mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
  1178. mtk_nfc_hw_init(nfc);
  1179. ret = nand_scan(nand, nsels);
  1180. if (ret)
  1181. return ret;
  1182. ret = mtd_device_register(mtd, NULL, 0);
  1183. if (ret) {
  1184. dev_err(dev, "mtd parse partition error\n");
  1185. nand_cleanup(nand);
  1186. return ret;
  1187. }
  1188. list_add_tail(&chip->node, &nfc->chips);
  1189. return 0;
  1190. }
  1191. static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
  1192. {
  1193. struct device_node *np = dev->of_node;
  1194. struct device_node *nand_np;
  1195. int ret;
  1196. for_each_child_of_node(np, nand_np) {
  1197. ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
  1198. if (ret) {
  1199. of_node_put(nand_np);
  1200. return ret;
  1201. }
  1202. }
  1203. return 0;
  1204. }
  1205. static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
  1206. .spare_size = spare_size_mt2701,
  1207. .num_spare_size = 16,
  1208. .pageformat_spare_shift = 4,
  1209. .nfi_clk_div = 1,
  1210. .max_sector = 16,
  1211. .max_sector_size = 1024,
  1212. };
  1213. static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
  1214. .spare_size = spare_size_mt2712,
  1215. .num_spare_size = 19,
  1216. .pageformat_spare_shift = 16,
  1217. .nfi_clk_div = 2,
  1218. .max_sector = 16,
  1219. .max_sector_size = 1024,
  1220. };
  1221. static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
  1222. .spare_size = spare_size_mt7622,
  1223. .num_spare_size = 4,
  1224. .pageformat_spare_shift = 4,
  1225. .nfi_clk_div = 1,
  1226. .max_sector = 8,
  1227. .max_sector_size = 512,
  1228. };
  1229. static const struct of_device_id mtk_nfc_id_table[] = {
  1230. {
  1231. .compatible = "mediatek,mt2701-nfc",
  1232. .data = &mtk_nfc_caps_mt2701,
  1233. }, {
  1234. .compatible = "mediatek,mt2712-nfc",
  1235. .data = &mtk_nfc_caps_mt2712,
  1236. }, {
  1237. .compatible = "mediatek,mt7622-nfc",
  1238. .data = &mtk_nfc_caps_mt7622,
  1239. },
  1240. {}
  1241. };
  1242. MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
  1243. static int mtk_nfc_probe(struct platform_device *pdev)
  1244. {
  1245. struct device *dev = &pdev->dev;
  1246. struct device_node *np = dev->of_node;
  1247. struct mtk_nfc *nfc;
  1248. int ret, irq;
  1249. nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
  1250. if (!nfc)
  1251. return -ENOMEM;
  1252. nand_controller_init(&nfc->controller);
  1253. INIT_LIST_HEAD(&nfc->chips);
  1254. nfc->controller.ops = &mtk_nfc_controller_ops;
  1255. /* probe defer if not ready */
  1256. nfc->ecc = of_mtk_ecc_get(np);
  1257. if (IS_ERR(nfc->ecc))
  1258. return PTR_ERR(nfc->ecc);
  1259. else if (!nfc->ecc)
  1260. return -ENODEV;
  1261. nfc->caps = of_device_get_match_data(dev);
  1262. nfc->dev = dev;
  1263. nfc->regs = devm_platform_ioremap_resource(pdev, 0);
  1264. if (IS_ERR(nfc->regs)) {
  1265. ret = PTR_ERR(nfc->regs);
  1266. goto release_ecc;
  1267. }
  1268. nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
  1269. if (IS_ERR(nfc->clk.nfi_clk)) {
  1270. dev_err(dev, "no clk\n");
  1271. ret = PTR_ERR(nfc->clk.nfi_clk);
  1272. goto release_ecc;
  1273. }
  1274. nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
  1275. if (IS_ERR(nfc->clk.pad_clk)) {
  1276. dev_err(dev, "no pad clk\n");
  1277. ret = PTR_ERR(nfc->clk.pad_clk);
  1278. goto release_ecc;
  1279. }
  1280. ret = mtk_nfc_enable_clk(dev, &nfc->clk);
  1281. if (ret)
  1282. goto release_ecc;
  1283. irq = platform_get_irq(pdev, 0);
  1284. if (irq < 0) {
  1285. ret = -EINVAL;
  1286. goto clk_disable;
  1287. }
  1288. ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
  1289. if (ret) {
  1290. dev_err(dev, "failed to request nfi irq\n");
  1291. goto clk_disable;
  1292. }
  1293. ret = dma_set_mask(dev, DMA_BIT_MASK(32));
  1294. if (ret) {
  1295. dev_err(dev, "failed to set dma mask\n");
  1296. goto clk_disable;
  1297. }
  1298. platform_set_drvdata(pdev, nfc);
  1299. ret = mtk_nfc_nand_chips_init(dev, nfc);
  1300. if (ret) {
  1301. dev_err(dev, "failed to init nand chips\n");
  1302. goto clk_disable;
  1303. }
  1304. return 0;
  1305. clk_disable:
  1306. mtk_nfc_disable_clk(&nfc->clk);
  1307. release_ecc:
  1308. mtk_ecc_release(nfc->ecc);
  1309. return ret;
  1310. }
  1311. static int mtk_nfc_remove(struct platform_device *pdev)
  1312. {
  1313. struct mtk_nfc *nfc = platform_get_drvdata(pdev);
  1314. struct mtk_nfc_nand_chip *mtk_chip;
  1315. struct nand_chip *chip;
  1316. int ret;
  1317. while (!list_empty(&nfc->chips)) {
  1318. mtk_chip = list_first_entry(&nfc->chips,
  1319. struct mtk_nfc_nand_chip, node);
  1320. chip = &mtk_chip->nand;
  1321. ret = mtd_device_unregister(nand_to_mtd(chip));
  1322. WARN_ON(ret);
  1323. nand_cleanup(chip);
  1324. list_del(&mtk_chip->node);
  1325. }
  1326. mtk_ecc_release(nfc->ecc);
  1327. mtk_nfc_disable_clk(&nfc->clk);
  1328. return 0;
  1329. }
  1330. #ifdef CONFIG_PM_SLEEP
  1331. static int mtk_nfc_suspend(struct device *dev)
  1332. {
  1333. struct mtk_nfc *nfc = dev_get_drvdata(dev);
  1334. mtk_nfc_disable_clk(&nfc->clk);
  1335. return 0;
  1336. }
  1337. static int mtk_nfc_resume(struct device *dev)
  1338. {
  1339. struct mtk_nfc *nfc = dev_get_drvdata(dev);
  1340. struct mtk_nfc_nand_chip *chip;
  1341. struct nand_chip *nand;
  1342. int ret;
  1343. u32 i;
  1344. udelay(200);
  1345. ret = mtk_nfc_enable_clk(dev, &nfc->clk);
  1346. if (ret)
  1347. return ret;
  1348. /* reset NAND chip if VCC was powered off */
  1349. list_for_each_entry(chip, &nfc->chips, node) {
  1350. nand = &chip->nand;
  1351. for (i = 0; i < chip->nsels; i++)
  1352. nand_reset(nand, i);
  1353. }
  1354. return 0;
  1355. }
  1356. static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
  1357. #endif
  1358. static struct platform_driver mtk_nfc_driver = {
  1359. .probe = mtk_nfc_probe,
  1360. .remove = mtk_nfc_remove,
  1361. .driver = {
  1362. .name = MTK_NAME,
  1363. .of_match_table = mtk_nfc_id_table,
  1364. #ifdef CONFIG_PM_SLEEP
  1365. .pm = &mtk_nfc_pm_ops,
  1366. #endif
  1367. },
  1368. };
  1369. module_platform_driver(mtk_nfc_driver);
  1370. MODULE_LICENSE("Dual MIT/GPL");
  1371. MODULE_AUTHOR("Xiaolei Li <[email protected]>");
  1372. MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");