ecc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Generic Error-Correcting Code (ECC) engine
  4. *
  5. * Copyright (C) 2019 Macronix
  6. * Author:
  7. * Miquèl RAYNAL <[email protected]>
  8. *
  9. *
  10. * This file describes the abstraction of any NAND ECC engine. It has been
  11. * designed to fit most cases, including parallel NANDs and SPI-NANDs.
  12. *
  13. * There are three main situations where instantiating this ECC engine makes
  14. * sense:
  15. * - external: The ECC engine is outside the NAND pipeline, typically this
  16. * is a software ECC engine, or an hardware engine that is
  17. * outside the NAND controller pipeline.
  18. * - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
  19. * controller's side. This is the case of most of the raw NAND
  20. * controllers. In the pipeline case, the ECC bytes are
  21. * generated/data corrected on the fly when a page is
  22. * written/read.
  23. * - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
  24. * Some NAND chips can correct themselves the data.
  25. *
  26. * Besides the initial setup and final cleanups, the interfaces are rather
  27. * simple:
  28. * - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
  29. * the I/O request type. In case of software correction or external
  30. * engine, this step may involve to derive the ECC bytes and place
  31. * them in the OOB area before a write.
  32. * - finish: Finish an I/O request. Correct the data in case of a read
  33. * request and report the number of corrected bits/uncorrectable
  34. * errors. Most likely empty for write operations, unless you have
  35. * hardware specific stuff to do, like shutting down the engine to
  36. * save power.
  37. *
  38. * The I/O request should be enclosed in a prepare()/finish() pair of calls
  39. * and will behave differently depending on the requested I/O type:
  40. * - raw: Correction disabled
  41. * - ecc: Correction enabled
  42. *
  43. * The request direction is impacting the logic as well:
  44. * - read: Load data from the NAND chip
  45. * - write: Store data in the NAND chip
  46. *
  47. * Mixing all this combinations together gives the following behavior.
  48. * Those are just examples, drivers are free to add custom steps in their
  49. * prepare/finish hook.
  50. *
  51. * [external ECC engine]
  52. * - external + prepare + raw + read: do nothing
  53. * - external + finish + raw + read: do nothing
  54. * - external + prepare + raw + write: do nothing
  55. * - external + finish + raw + write: do nothing
  56. * - external + prepare + ecc + read: do nothing
  57. * - external + finish + ecc + read: calculate expected ECC bytes, extract
  58. * ECC bytes from OOB buffer, correct
  59. * and report any bitflip/error
  60. * - external + prepare + ecc + write: calculate ECC bytes and store them at
  61. * the right place in the OOB buffer based
  62. * on the OOB layout
  63. * - external + finish + ecc + write: do nothing
  64. *
  65. * [pipelined ECC engine]
  66. * - pipelined + prepare + raw + read: disable the controller's ECC engine if
  67. * activated
  68. * - pipelined + finish + raw + read: do nothing
  69. * - pipelined + prepare + raw + write: disable the controller's ECC engine if
  70. * activated
  71. * - pipelined + finish + raw + write: do nothing
  72. * - pipelined + prepare + ecc + read: enable the controller's ECC engine if
  73. * deactivated
  74. * - pipelined + finish + ecc + read: check the status, report any
  75. * error/bitflip
  76. * - pipelined + prepare + ecc + write: enable the controller's ECC engine if
  77. * deactivated
  78. * - pipelined + finish + ecc + write: do nothing
  79. *
  80. * [ondie ECC engine]
  81. * - ondie + prepare + raw + read: send commands to disable the on-chip ECC
  82. * engine if activated
  83. * - ondie + finish + raw + read: do nothing
  84. * - ondie + prepare + raw + write: send commands to disable the on-chip ECC
  85. * engine if activated
  86. * - ondie + finish + raw + write: do nothing
  87. * - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
  88. * engine if deactivated
  89. * - ondie + finish + ecc + read: send commands to check the status, report
  90. * any error/bitflip
  91. * - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
  92. * engine if deactivated
  93. * - ondie + finish + ecc + write: do nothing
  94. */
  95. #include <linux/module.h>
  96. #include <linux/mtd/nand.h>
  97. #include <linux/slab.h>
  98. #include <linux/of.h>
  99. #include <linux/of_device.h>
  100. #include <linux/of_platform.h>
  101. static LIST_HEAD(on_host_hw_engines);
  102. static DEFINE_MUTEX(on_host_hw_engines_mutex);
  103. /**
  104. * nand_ecc_init_ctx - Init the ECC engine context
  105. * @nand: the NAND device
  106. *
  107. * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
  108. */
  109. int nand_ecc_init_ctx(struct nand_device *nand)
  110. {
  111. if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx)
  112. return 0;
  113. return nand->ecc.engine->ops->init_ctx(nand);
  114. }
  115. EXPORT_SYMBOL(nand_ecc_init_ctx);
  116. /**
  117. * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
  118. * @nand: the NAND device
  119. */
  120. void nand_ecc_cleanup_ctx(struct nand_device *nand)
  121. {
  122. if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx)
  123. nand->ecc.engine->ops->cleanup_ctx(nand);
  124. }
  125. EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
  126. /**
  127. * nand_ecc_prepare_io_req - Prepare an I/O request
  128. * @nand: the NAND device
  129. * @req: the I/O request
  130. */
  131. int nand_ecc_prepare_io_req(struct nand_device *nand,
  132. struct nand_page_io_req *req)
  133. {
  134. if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req)
  135. return 0;
  136. return nand->ecc.engine->ops->prepare_io_req(nand, req);
  137. }
  138. EXPORT_SYMBOL(nand_ecc_prepare_io_req);
  139. /**
  140. * nand_ecc_finish_io_req - Finish an I/O request
  141. * @nand: the NAND device
  142. * @req: the I/O request
  143. */
  144. int nand_ecc_finish_io_req(struct nand_device *nand,
  145. struct nand_page_io_req *req)
  146. {
  147. if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req)
  148. return 0;
  149. return nand->ecc.engine->ops->finish_io_req(nand, req);
  150. }
  151. EXPORT_SYMBOL(nand_ecc_finish_io_req);
  152. /* Define default OOB placement schemes for large and small page devices */
  153. static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
  154. struct mtd_oob_region *oobregion)
  155. {
  156. struct nand_device *nand = mtd_to_nanddev(mtd);
  157. unsigned int total_ecc_bytes = nand->ecc.ctx.total;
  158. if (section > 1)
  159. return -ERANGE;
  160. if (!section) {
  161. oobregion->offset = 0;
  162. if (mtd->oobsize == 16)
  163. oobregion->length = 4;
  164. else
  165. oobregion->length = 3;
  166. } else {
  167. if (mtd->oobsize == 8)
  168. return -ERANGE;
  169. oobregion->offset = 6;
  170. oobregion->length = total_ecc_bytes - 4;
  171. }
  172. return 0;
  173. }
  174. static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
  175. struct mtd_oob_region *oobregion)
  176. {
  177. if (section > 1)
  178. return -ERANGE;
  179. if (mtd->oobsize == 16) {
  180. if (section)
  181. return -ERANGE;
  182. oobregion->length = 8;
  183. oobregion->offset = 8;
  184. } else {
  185. oobregion->length = 2;
  186. if (!section)
  187. oobregion->offset = 3;
  188. else
  189. oobregion->offset = 6;
  190. }
  191. return 0;
  192. }
  193. static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
  194. .ecc = nand_ooblayout_ecc_sp,
  195. .free = nand_ooblayout_free_sp,
  196. };
  197. const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
  198. {
  199. return &nand_ooblayout_sp_ops;
  200. }
  201. EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
  202. static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
  203. struct mtd_oob_region *oobregion)
  204. {
  205. struct nand_device *nand = mtd_to_nanddev(mtd);
  206. unsigned int total_ecc_bytes = nand->ecc.ctx.total;
  207. if (section || !total_ecc_bytes)
  208. return -ERANGE;
  209. oobregion->length = total_ecc_bytes;
  210. oobregion->offset = mtd->oobsize - oobregion->length;
  211. return 0;
  212. }
  213. static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
  214. struct mtd_oob_region *oobregion)
  215. {
  216. struct nand_device *nand = mtd_to_nanddev(mtd);
  217. unsigned int total_ecc_bytes = nand->ecc.ctx.total;
  218. if (section)
  219. return -ERANGE;
  220. oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
  221. oobregion->offset = 2;
  222. return 0;
  223. }
  224. static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
  225. .ecc = nand_ooblayout_ecc_lp,
  226. .free = nand_ooblayout_free_lp,
  227. };
  228. const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
  229. {
  230. return &nand_ooblayout_lp_ops;
  231. }
  232. EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
  233. /*
  234. * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
  235. * are placed at a fixed offset.
  236. */
  237. static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
  238. struct mtd_oob_region *oobregion)
  239. {
  240. struct nand_device *nand = mtd_to_nanddev(mtd);
  241. unsigned int total_ecc_bytes = nand->ecc.ctx.total;
  242. if (section)
  243. return -ERANGE;
  244. switch (mtd->oobsize) {
  245. case 64:
  246. oobregion->offset = 40;
  247. break;
  248. case 128:
  249. oobregion->offset = 80;
  250. break;
  251. default:
  252. return -EINVAL;
  253. }
  254. oobregion->length = total_ecc_bytes;
  255. if (oobregion->offset + oobregion->length > mtd->oobsize)
  256. return -ERANGE;
  257. return 0;
  258. }
  259. static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
  260. struct mtd_oob_region *oobregion)
  261. {
  262. struct nand_device *nand = mtd_to_nanddev(mtd);
  263. unsigned int total_ecc_bytes = nand->ecc.ctx.total;
  264. int ecc_offset = 0;
  265. if (section < 0 || section > 1)
  266. return -ERANGE;
  267. switch (mtd->oobsize) {
  268. case 64:
  269. ecc_offset = 40;
  270. break;
  271. case 128:
  272. ecc_offset = 80;
  273. break;
  274. default:
  275. return -EINVAL;
  276. }
  277. if (section == 0) {
  278. oobregion->offset = 2;
  279. oobregion->length = ecc_offset - 2;
  280. } else {
  281. oobregion->offset = ecc_offset + total_ecc_bytes;
  282. oobregion->length = mtd->oobsize - oobregion->offset;
  283. }
  284. return 0;
  285. }
  286. static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
  287. .ecc = nand_ooblayout_ecc_lp_hamming,
  288. .free = nand_ooblayout_free_lp_hamming,
  289. };
  290. const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
  291. {
  292. return &nand_ooblayout_lp_hamming_ops;
  293. }
  294. EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
  295. static enum nand_ecc_engine_type
  296. of_get_nand_ecc_engine_type(struct device_node *np)
  297. {
  298. struct device_node *eng_np;
  299. if (of_property_read_bool(np, "nand-no-ecc-engine"))
  300. return NAND_ECC_ENGINE_TYPE_NONE;
  301. if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
  302. return NAND_ECC_ENGINE_TYPE_SOFT;
  303. eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
  304. of_node_put(eng_np);
  305. if (eng_np) {
  306. if (eng_np == np)
  307. return NAND_ECC_ENGINE_TYPE_ON_DIE;
  308. else
  309. return NAND_ECC_ENGINE_TYPE_ON_HOST;
  310. }
  311. return NAND_ECC_ENGINE_TYPE_INVALID;
  312. }
  313. static const char * const nand_ecc_placement[] = {
  314. [NAND_ECC_PLACEMENT_OOB] = "oob",
  315. [NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
  316. };
  317. static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
  318. {
  319. enum nand_ecc_placement placement;
  320. const char *pm;
  321. int err;
  322. err = of_property_read_string(np, "nand-ecc-placement", &pm);
  323. if (!err) {
  324. for (placement = NAND_ECC_PLACEMENT_OOB;
  325. placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
  326. if (!strcasecmp(pm, nand_ecc_placement[placement]))
  327. return placement;
  328. }
  329. }
  330. return NAND_ECC_PLACEMENT_UNKNOWN;
  331. }
  332. static const char * const nand_ecc_algos[] = {
  333. [NAND_ECC_ALGO_HAMMING] = "hamming",
  334. [NAND_ECC_ALGO_BCH] = "bch",
  335. [NAND_ECC_ALGO_RS] = "rs",
  336. };
  337. static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
  338. {
  339. enum nand_ecc_algo ecc_algo;
  340. const char *pm;
  341. int err;
  342. err = of_property_read_string(np, "nand-ecc-algo", &pm);
  343. if (!err) {
  344. for (ecc_algo = NAND_ECC_ALGO_HAMMING;
  345. ecc_algo < ARRAY_SIZE(nand_ecc_algos);
  346. ecc_algo++) {
  347. if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
  348. return ecc_algo;
  349. }
  350. }
  351. return NAND_ECC_ALGO_UNKNOWN;
  352. }
  353. static int of_get_nand_ecc_step_size(struct device_node *np)
  354. {
  355. int ret;
  356. u32 val;
  357. ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
  358. return ret ? ret : val;
  359. }
  360. static int of_get_nand_ecc_strength(struct device_node *np)
  361. {
  362. int ret;
  363. u32 val;
  364. ret = of_property_read_u32(np, "nand-ecc-strength", &val);
  365. return ret ? ret : val;
  366. }
  367. void of_get_nand_ecc_user_config(struct nand_device *nand)
  368. {
  369. struct device_node *dn = nanddev_get_of_node(nand);
  370. int strength, size;
  371. nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
  372. nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
  373. nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
  374. strength = of_get_nand_ecc_strength(dn);
  375. if (strength >= 0)
  376. nand->ecc.user_conf.strength = strength;
  377. size = of_get_nand_ecc_step_size(dn);
  378. if (size >= 0)
  379. nand->ecc.user_conf.step_size = size;
  380. if (of_property_read_bool(dn, "nand-ecc-maximize"))
  381. nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
  382. }
  383. EXPORT_SYMBOL(of_get_nand_ecc_user_config);
  384. /**
  385. * nand_ecc_is_strong_enough - Check if the chip configuration meets the
  386. * datasheet requirements.
  387. *
  388. * @nand: Device to check
  389. *
  390. * If our configuration corrects A bits per B bytes and the minimum
  391. * required correction level is X bits per Y bytes, then we must ensure
  392. * both of the following are true:
  393. *
  394. * (1) A / B >= X / Y
  395. * (2) A >= X
  396. *
  397. * Requirement (1) ensures we can correct for the required bitflip density.
  398. * Requirement (2) ensures we can correct even when all bitflips are clumped
  399. * in the same sector.
  400. */
  401. bool nand_ecc_is_strong_enough(struct nand_device *nand)
  402. {
  403. const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
  404. const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
  405. struct mtd_info *mtd = nanddev_to_mtd(nand);
  406. int corr, ds_corr;
  407. if (conf->step_size == 0 || reqs->step_size == 0)
  408. /* Not enough information */
  409. return true;
  410. /*
  411. * We get the number of corrected bits per page to compare
  412. * the correction density.
  413. */
  414. corr = (mtd->writesize * conf->strength) / conf->step_size;
  415. ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
  416. return corr >= ds_corr && conf->strength >= reqs->strength;
  417. }
  418. EXPORT_SYMBOL(nand_ecc_is_strong_enough);
  419. /* ECC engine driver internal helpers */
  420. int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
  421. struct nand_device *nand)
  422. {
  423. unsigned int total_buffer_size;
  424. ctx->nand = nand;
  425. /* Let the user decide the exact length of each buffer */
  426. if (!ctx->page_buffer_size)
  427. ctx->page_buffer_size = nanddev_page_size(nand);
  428. if (!ctx->oob_buffer_size)
  429. ctx->oob_buffer_size = nanddev_per_page_oobsize(nand);
  430. total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size;
  431. ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL);
  432. if (!ctx->spare_databuf)
  433. return -ENOMEM;
  434. ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size;
  435. return 0;
  436. }
  437. EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking);
  438. void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx)
  439. {
  440. kfree(ctx->spare_databuf);
  441. }
  442. EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking);
  443. /*
  444. * Ensure data and OOB area is fully read/written otherwise the correction might
  445. * not work as expected.
  446. */
  447. void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
  448. struct nand_page_io_req *req)
  449. {
  450. struct nand_device *nand = ctx->nand;
  451. struct nand_page_io_req *orig, *tweak;
  452. /* Save the original request */
  453. ctx->orig_req = *req;
  454. ctx->bounce_data = false;
  455. ctx->bounce_oob = false;
  456. orig = &ctx->orig_req;
  457. tweak = req;
  458. /* Ensure the request covers the entire page */
  459. if (orig->datalen < nanddev_page_size(nand)) {
  460. ctx->bounce_data = true;
  461. tweak->dataoffs = 0;
  462. tweak->datalen = nanddev_page_size(nand);
  463. tweak->databuf.in = ctx->spare_databuf;
  464. memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size);
  465. }
  466. if (orig->ooblen < nanddev_per_page_oobsize(nand)) {
  467. ctx->bounce_oob = true;
  468. tweak->ooboffs = 0;
  469. tweak->ooblen = nanddev_per_page_oobsize(nand);
  470. tweak->oobbuf.in = ctx->spare_oobbuf;
  471. memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
  472. }
  473. /* Copy the data that must be writen in the bounce buffers, if needed */
  474. if (orig->type == NAND_PAGE_WRITE) {
  475. if (ctx->bounce_data)
  476. memcpy((void *)tweak->databuf.out + orig->dataoffs,
  477. orig->databuf.out, orig->datalen);
  478. if (ctx->bounce_oob)
  479. memcpy((void *)tweak->oobbuf.out + orig->ooboffs,
  480. orig->oobbuf.out, orig->ooblen);
  481. }
  482. }
  483. EXPORT_SYMBOL_GPL(nand_ecc_tweak_req);
  484. void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
  485. struct nand_page_io_req *req)
  486. {
  487. struct nand_page_io_req *orig, *tweak;
  488. orig = &ctx->orig_req;
  489. tweak = req;
  490. /* Restore the data read from the bounce buffers, if needed */
  491. if (orig->type == NAND_PAGE_READ) {
  492. if (ctx->bounce_data)
  493. memcpy(orig->databuf.in,
  494. tweak->databuf.in + orig->dataoffs,
  495. orig->datalen);
  496. if (ctx->bounce_oob)
  497. memcpy(orig->oobbuf.in,
  498. tweak->oobbuf.in + orig->ooboffs,
  499. orig->ooblen);
  500. }
  501. /* Ensure the original request is restored */
  502. *req = *orig;
  503. }
  504. EXPORT_SYMBOL_GPL(nand_ecc_restore_req);
  505. struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand)
  506. {
  507. unsigned int algo = nand->ecc.user_conf.algo;
  508. if (algo == NAND_ECC_ALGO_UNKNOWN)
  509. algo = nand->ecc.defaults.algo;
  510. switch (algo) {
  511. case NAND_ECC_ALGO_HAMMING:
  512. return nand_ecc_sw_hamming_get_engine();
  513. case NAND_ECC_ALGO_BCH:
  514. return nand_ecc_sw_bch_get_engine();
  515. default:
  516. break;
  517. }
  518. return NULL;
  519. }
  520. EXPORT_SYMBOL(nand_ecc_get_sw_engine);
  521. struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
  522. {
  523. return nand->ecc.ondie_engine;
  524. }
  525. EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
  526. int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
  527. {
  528. struct nand_ecc_engine *item;
  529. if (!engine)
  530. return -EINVAL;
  531. /* Prevent multiple registrations of one engine */
  532. list_for_each_entry(item, &on_host_hw_engines, node)
  533. if (item == engine)
  534. return 0;
  535. mutex_lock(&on_host_hw_engines_mutex);
  536. list_add_tail(&engine->node, &on_host_hw_engines);
  537. mutex_unlock(&on_host_hw_engines_mutex);
  538. return 0;
  539. }
  540. EXPORT_SYMBOL(nand_ecc_register_on_host_hw_engine);
  541. int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
  542. {
  543. if (!engine)
  544. return -EINVAL;
  545. mutex_lock(&on_host_hw_engines_mutex);
  546. list_del(&engine->node);
  547. mutex_unlock(&on_host_hw_engines_mutex);
  548. return 0;
  549. }
  550. EXPORT_SYMBOL(nand_ecc_unregister_on_host_hw_engine);
  551. static struct nand_ecc_engine *nand_ecc_match_on_host_hw_engine(struct device *dev)
  552. {
  553. struct nand_ecc_engine *item;
  554. list_for_each_entry(item, &on_host_hw_engines, node)
  555. if (item->dev == dev)
  556. return item;
  557. return NULL;
  558. }
  559. struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand)
  560. {
  561. struct nand_ecc_engine *engine = NULL;
  562. struct device *dev = &nand->mtd.dev;
  563. struct platform_device *pdev;
  564. struct device_node *np;
  565. if (list_empty(&on_host_hw_engines))
  566. return NULL;
  567. /* Check for an explicit nand-ecc-engine property */
  568. np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0);
  569. if (np) {
  570. pdev = of_find_device_by_node(np);
  571. if (!pdev)
  572. return ERR_PTR(-EPROBE_DEFER);
  573. engine = nand_ecc_match_on_host_hw_engine(&pdev->dev);
  574. platform_device_put(pdev);
  575. of_node_put(np);
  576. if (!engine)
  577. return ERR_PTR(-EPROBE_DEFER);
  578. }
  579. if (engine)
  580. get_device(engine->dev);
  581. return engine;
  582. }
  583. EXPORT_SYMBOL(nand_ecc_get_on_host_hw_engine);
  584. void nand_ecc_put_on_host_hw_engine(struct nand_device *nand)
  585. {
  586. put_device(nand->ecc.engine->dev);
  587. }
  588. EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine);
  589. /*
  590. * In the case of a pipelined engine, the device registering the ECC
  591. * engine is not necessarily the ECC engine itself but may be a host controller.
  592. * It is then useful to provide a helper to retrieve the right device object
  593. * which actually represents the ECC engine.
  594. */
  595. struct device *nand_ecc_get_engine_dev(struct device *host)
  596. {
  597. struct platform_device *ecc_pdev;
  598. struct device_node *np;
  599. /*
  600. * If the device node contains this property, it means we need to follow
  601. * it in order to get the right ECC engine device we are looking for.
  602. */
  603. np = of_parse_phandle(host->of_node, "nand-ecc-engine", 0);
  604. if (!np)
  605. return host;
  606. ecc_pdev = of_find_device_by_node(np);
  607. if (!ecc_pdev) {
  608. of_node_put(np);
  609. return NULL;
  610. }
  611. platform_device_put(ecc_pdev);
  612. of_node_put(np);
  613. return &ecc_pdev->dev;
  614. }
  615. MODULE_LICENSE("GPL");
  616. MODULE_AUTHOR("Miquel Raynal <[email protected]>");
  617. MODULE_DESCRIPTION("Generic ECC engine");