nand.h 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright 2017 - Free Electrons
  4. *
  5. * Authors:
  6. * Boris Brezillon <[email protected]>
  7. * Peter Pan <[email protected]>
  8. */
  9. #ifndef __LINUX_MTD_NAND_H
  10. #define __LINUX_MTD_NAND_H
  11. #include <linux/mtd/mtd.h>
  12. struct nand_device;
  13. /**
  14. * struct nand_memory_organization - Memory organization structure
  15. * @bits_per_cell: number of bits per NAND cell
  16. * @pagesize: page size
  17. * @oobsize: OOB area size
  18. * @pages_per_eraseblock: number of pages per eraseblock
  19. * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
  20. * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
  21. * @planes_per_lun: number of planes per LUN
  22. * @luns_per_target: number of LUN per target (target is a synonym for die)
  23. * @ntargets: total number of targets exposed by the NAND device
  24. */
  25. struct nand_memory_organization {
  26. unsigned int bits_per_cell;
  27. unsigned int pagesize;
  28. unsigned int oobsize;
  29. unsigned int pages_per_eraseblock;
  30. unsigned int eraseblocks_per_lun;
  31. unsigned int max_bad_eraseblocks_per_lun;
  32. unsigned int planes_per_lun;
  33. unsigned int luns_per_target;
  34. unsigned int ntargets;
  35. };
  36. #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \
  37. { \
  38. .bits_per_cell = (bpc), \
  39. .pagesize = (ps), \
  40. .oobsize = (os), \
  41. .pages_per_eraseblock = (ppe), \
  42. .eraseblocks_per_lun = (epl), \
  43. .max_bad_eraseblocks_per_lun = (mbb), \
  44. .planes_per_lun = (ppl), \
  45. .luns_per_target = (lpt), \
  46. .ntargets = (nt), \
  47. }
  48. /**
  49. * struct nand_row_converter - Information needed to convert an absolute offset
  50. * into a row address
  51. * @lun_addr_shift: position of the LUN identifier in the row address
  52. * @eraseblock_addr_shift: position of the eraseblock identifier in the row
  53. * address
  54. */
  55. struct nand_row_converter {
  56. unsigned int lun_addr_shift;
  57. unsigned int eraseblock_addr_shift;
  58. };
  59. /**
  60. * struct nand_pos - NAND position object
  61. * @target: the NAND target/die
  62. * @lun: the LUN identifier
  63. * @plane: the plane within the LUN
  64. * @eraseblock: the eraseblock within the LUN
  65. * @page: the page within the LUN
  66. *
  67. * These information are usually used by specific sub-layers to select the
  68. * appropriate target/die and generate a row address to pass to the device.
  69. */
  70. struct nand_pos {
  71. unsigned int target;
  72. unsigned int lun;
  73. unsigned int plane;
  74. unsigned int eraseblock;
  75. unsigned int page;
  76. };
  77. /**
  78. * enum nand_page_io_req_type - Direction of an I/O request
  79. * @NAND_PAGE_READ: from the chip, to the controller
  80. * @NAND_PAGE_WRITE: from the controller, to the chip
  81. */
  82. enum nand_page_io_req_type {
  83. NAND_PAGE_READ = 0,
  84. NAND_PAGE_WRITE,
  85. };
  86. /**
  87. * struct nand_page_io_req - NAND I/O request object
  88. * @type: the type of page I/O: read or write
  89. * @pos: the position this I/O request is targeting
  90. * @dataoffs: the offset within the page
  91. * @datalen: number of data bytes to read from/write to this page
  92. * @databuf: buffer to store data in or get data from
  93. * @ooboffs: the OOB offset within the page
  94. * @ooblen: the number of OOB bytes to read from/write to this page
  95. * @oobbuf: buffer to store OOB data in or get OOB data from
  96. * @mode: one of the %MTD_OPS_XXX mode
  97. *
  98. * This object is used to pass per-page I/O requests to NAND sub-layers. This
  99. * way all useful information are already formatted in a useful way and
  100. * specific NAND layers can focus on translating these information into
  101. * specific commands/operations.
  102. */
  103. struct nand_page_io_req {
  104. enum nand_page_io_req_type type;
  105. struct nand_pos pos;
  106. unsigned int dataoffs;
  107. unsigned int datalen;
  108. union {
  109. const void *out;
  110. void *in;
  111. } databuf;
  112. unsigned int ooboffs;
  113. unsigned int ooblen;
  114. union {
  115. const void *out;
  116. void *in;
  117. } oobbuf;
  118. int mode;
  119. };
  120. const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
  121. const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
  122. const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
  123. /**
  124. * enum nand_ecc_engine_type - NAND ECC engine type
  125. * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
  126. * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
  127. * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
  128. * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
  129. * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
  130. */
  131. enum nand_ecc_engine_type {
  132. NAND_ECC_ENGINE_TYPE_INVALID,
  133. NAND_ECC_ENGINE_TYPE_NONE,
  134. NAND_ECC_ENGINE_TYPE_SOFT,
  135. NAND_ECC_ENGINE_TYPE_ON_HOST,
  136. NAND_ECC_ENGINE_TYPE_ON_DIE,
  137. };
  138. /**
  139. * enum nand_ecc_placement - NAND ECC bytes placement
  140. * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
  141. * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
  142. * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
  143. * interleaved with regular data in the main
  144. * area
  145. */
  146. enum nand_ecc_placement {
  147. NAND_ECC_PLACEMENT_UNKNOWN,
  148. NAND_ECC_PLACEMENT_OOB,
  149. NAND_ECC_PLACEMENT_INTERLEAVED,
  150. };
  151. /**
  152. * enum nand_ecc_algo - NAND ECC algorithm
  153. * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
  154. * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
  155. * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
  156. * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
  157. */
  158. enum nand_ecc_algo {
  159. NAND_ECC_ALGO_UNKNOWN,
  160. NAND_ECC_ALGO_HAMMING,
  161. NAND_ECC_ALGO_BCH,
  162. NAND_ECC_ALGO_RS,
  163. };
  164. /**
  165. * struct nand_ecc_props - NAND ECC properties
  166. * @engine_type: ECC engine type
  167. * @placement: OOB placement (if relevant)
  168. * @algo: ECC algorithm (if relevant)
  169. * @strength: ECC strength
  170. * @step_size: Number of bytes per step
  171. * @flags: Misc properties
  172. */
  173. struct nand_ecc_props {
  174. enum nand_ecc_engine_type engine_type;
  175. enum nand_ecc_placement placement;
  176. enum nand_ecc_algo algo;
  177. unsigned int strength;
  178. unsigned int step_size;
  179. unsigned int flags;
  180. };
  181. #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
  182. /* NAND ECC misc flags */
  183. #define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
  184. /**
  185. * struct nand_bbt - bad block table object
  186. * @cache: in memory BBT cache
  187. */
  188. struct nand_bbt {
  189. unsigned long *cache;
  190. };
  191. /**
  192. * struct nand_ops - NAND operations
  193. * @erase: erase a specific block. No need to check if the block is bad before
  194. * erasing, this has been taken care of by the generic NAND layer
  195. * @markbad: mark a specific block bad. No need to check if the block is
  196. * already marked bad, this has been taken care of by the generic
  197. * NAND layer. This method should just write the BBM (Bad Block
  198. * Marker) so that future call to struct_nand_ops->isbad() return
  199. * true
  200. * @isbad: check whether a block is bad or not. This method should just read
  201. * the BBM and return whether the block is bad or not based on what it
  202. * reads
  203. *
  204. * These are all low level operations that should be implemented by specialized
  205. * NAND layers (SPI NAND, raw NAND, ...).
  206. */
  207. struct nand_ops {
  208. int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
  209. int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
  210. bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
  211. };
  212. /**
  213. * struct nand_ecc_context - Context for the ECC engine
  214. * @conf: basic ECC engine parameters
  215. * @nsteps: number of ECC steps
  216. * @total: total number of bytes used for storing ECC codes, this is used by
  217. * generic OOB layouts
  218. * @priv: ECC engine driver private data
  219. */
  220. struct nand_ecc_context {
  221. struct nand_ecc_props conf;
  222. unsigned int nsteps;
  223. unsigned int total;
  224. void *priv;
  225. };
  226. /**
  227. * struct nand_ecc_engine_ops - ECC engine operations
  228. * @init_ctx: given a desired user configuration for the pointed NAND device,
  229. * requests the ECC engine driver to setup a configuration with
  230. * values it supports.
  231. * @cleanup_ctx: clean the context initialized by @init_ctx.
  232. * @prepare_io_req: is called before reading/writing a page to prepare the I/O
  233. * request to be performed with ECC correction.
  234. * @finish_io_req: is called after reading/writing a page to terminate the I/O
  235. * request and ensure proper ECC correction.
  236. */
  237. struct nand_ecc_engine_ops {
  238. int (*init_ctx)(struct nand_device *nand);
  239. void (*cleanup_ctx)(struct nand_device *nand);
  240. int (*prepare_io_req)(struct nand_device *nand,
  241. struct nand_page_io_req *req);
  242. int (*finish_io_req)(struct nand_device *nand,
  243. struct nand_page_io_req *req);
  244. };
  245. /**
  246. * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated
  247. * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value
  248. * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly
  249. * correction, does not need to copy
  250. * data around
  251. * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the
  252. * data into its own area before use
  253. */
  254. enum nand_ecc_engine_integration {
  255. NAND_ECC_ENGINE_INTEGRATION_INVALID,
  256. NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
  257. NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
  258. };
  259. /**
  260. * struct nand_ecc_engine - ECC engine abstraction for NAND devices
  261. * @dev: Host device
  262. * @node: Private field for registration time
  263. * @ops: ECC engine operations
  264. * @integration: How the engine is integrated with the host
  265. * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines)
  266. * @priv: Private data
  267. */
  268. struct nand_ecc_engine {
  269. struct device *dev;
  270. struct list_head node;
  271. struct nand_ecc_engine_ops *ops;
  272. enum nand_ecc_engine_integration integration;
  273. void *priv;
  274. };
  275. void of_get_nand_ecc_user_config(struct nand_device *nand);
  276. int nand_ecc_init_ctx(struct nand_device *nand);
  277. void nand_ecc_cleanup_ctx(struct nand_device *nand);
  278. int nand_ecc_prepare_io_req(struct nand_device *nand,
  279. struct nand_page_io_req *req);
  280. int nand_ecc_finish_io_req(struct nand_device *nand,
  281. struct nand_page_io_req *req);
  282. bool nand_ecc_is_strong_enough(struct nand_device *nand);
  283. #if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
  284. int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
  285. int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
  286. #else
  287. static inline int
  288. nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
  289. {
  290. return -ENOTSUPP;
  291. }
  292. static inline int
  293. nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
  294. {
  295. return -ENOTSUPP;
  296. }
  297. #endif
  298. struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
  299. struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
  300. struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
  301. void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
  302. struct device *nand_ecc_get_engine_dev(struct device *host);
  303. #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
  304. struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
  305. #else
  306. static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
  307. {
  308. return NULL;
  309. }
  310. #endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
  311. #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
  312. struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
  313. #else
  314. static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
  315. {
  316. return NULL;
  317. }
  318. #endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
  319. /**
  320. * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests
  321. * @orig_req: Pointer to the original IO request
  322. * @nand: Related NAND device, to have access to its memory organization
  323. * @page_buffer_size: Real size of the page buffer to use (can be set by the
  324. * user before the tweaking mechanism initialization)
  325. * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the
  326. * user before the tweaking mechanism initialization)
  327. * @spare_databuf: Data bounce buffer
  328. * @spare_oobbuf: OOB bounce buffer
  329. * @bounce_data: Flag indicating a data bounce buffer is used
  330. * @bounce_oob: Flag indicating an OOB bounce buffer is used
  331. */
  332. struct nand_ecc_req_tweak_ctx {
  333. struct nand_page_io_req orig_req;
  334. struct nand_device *nand;
  335. unsigned int page_buffer_size;
  336. unsigned int oob_buffer_size;
  337. void *spare_databuf;
  338. void *spare_oobbuf;
  339. bool bounce_data;
  340. bool bounce_oob;
  341. };
  342. int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
  343. struct nand_device *nand);
  344. void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
  345. void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
  346. struct nand_page_io_req *req);
  347. void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
  348. struct nand_page_io_req *req);
  349. /**
  350. * struct nand_ecc - Information relative to the ECC
  351. * @defaults: Default values, depend on the underlying subsystem
  352. * @requirements: ECC requirements from the NAND chip perspective
  353. * @user_conf: User desires in terms of ECC parameters
  354. * @ctx: ECC context for the ECC engine, derived from the device @requirements
  355. * the @user_conf and the @defaults
  356. * @ondie_engine: On-die ECC engine reference, if any
  357. * @engine: ECC engine actually bound
  358. */
  359. struct nand_ecc {
  360. struct nand_ecc_props defaults;
  361. struct nand_ecc_props requirements;
  362. struct nand_ecc_props user_conf;
  363. struct nand_ecc_context ctx;
  364. struct nand_ecc_engine *ondie_engine;
  365. struct nand_ecc_engine *engine;
  366. };
  367. /**
  368. * struct nand_device - NAND device
  369. * @mtd: MTD instance attached to the NAND device
  370. * @memorg: memory layout
  371. * @ecc: NAND ECC object attached to the NAND device
  372. * @rowconv: position to row address converter
  373. * @bbt: bad block table info
  374. * @ops: NAND operations attached to the NAND device
  375. *
  376. * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
  377. * should declare their own NAND object embedding a nand_device struct (that's
  378. * how inheritance is done).
  379. * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
  380. * be filled at device detection time to reflect the NAND device
  381. * capabilities/requirements. Once this is done nanddev_init() can be called.
  382. * It will take care of converting NAND information into MTD ones, which means
  383. * the specialized NAND layers should never manually tweak
  384. * struct_nand_device->mtd except for the ->_read/write() hooks.
  385. */
  386. struct nand_device {
  387. struct mtd_info mtd;
  388. struct nand_memory_organization memorg;
  389. struct nand_ecc ecc;
  390. struct nand_row_converter rowconv;
  391. struct nand_bbt bbt;
  392. const struct nand_ops *ops;
  393. };
  394. /**
  395. * struct nand_io_iter - NAND I/O iterator
  396. * @req: current I/O request
  397. * @oobbytes_per_page: maximum number of OOB bytes per page
  398. * @dataleft: remaining number of data bytes to read/write
  399. * @oobleft: remaining number of OOB bytes to read/write
  400. *
  401. * Can be used by specialized NAND layers to iterate over all pages covered
  402. * by an MTD I/O request, which should greatly simplifies the boiler-plate
  403. * code needed to read/write data from/to a NAND device.
  404. */
  405. struct nand_io_iter {
  406. struct nand_page_io_req req;
  407. unsigned int oobbytes_per_page;
  408. unsigned int dataleft;
  409. unsigned int oobleft;
  410. };
  411. /**
  412. * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
  413. * @mtd: MTD instance
  414. *
  415. * Return: the NAND device embedding @mtd.
  416. */
  417. static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
  418. {
  419. return container_of(mtd, struct nand_device, mtd);
  420. }
  421. /**
  422. * nanddev_to_mtd() - Get the MTD device attached to a NAND device
  423. * @nand: NAND device
  424. *
  425. * Return: the MTD device embedded in @nand.
  426. */
  427. static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
  428. {
  429. return &nand->mtd;
  430. }
  431. /*
  432. * nanddev_bits_per_cell() - Get the number of bits per cell
  433. * @nand: NAND device
  434. *
  435. * Return: the number of bits per cell.
  436. */
  437. static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
  438. {
  439. return nand->memorg.bits_per_cell;
  440. }
  441. /**
  442. * nanddev_page_size() - Get NAND page size
  443. * @nand: NAND device
  444. *
  445. * Return: the page size.
  446. */
  447. static inline size_t nanddev_page_size(const struct nand_device *nand)
  448. {
  449. return nand->memorg.pagesize;
  450. }
  451. /**
  452. * nanddev_per_page_oobsize() - Get NAND OOB size
  453. * @nand: NAND device
  454. *
  455. * Return: the OOB size.
  456. */
  457. static inline unsigned int
  458. nanddev_per_page_oobsize(const struct nand_device *nand)
  459. {
  460. return nand->memorg.oobsize;
  461. }
  462. /**
  463. * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
  464. * @nand: NAND device
  465. *
  466. * Return: the number of pages per eraseblock.
  467. */
  468. static inline unsigned int
  469. nanddev_pages_per_eraseblock(const struct nand_device *nand)
  470. {
  471. return nand->memorg.pages_per_eraseblock;
  472. }
  473. /**
  474. * nanddev_pages_per_target() - Get the number of pages per target
  475. * @nand: NAND device
  476. *
  477. * Return: the number of pages per target.
  478. */
  479. static inline unsigned int
  480. nanddev_pages_per_target(const struct nand_device *nand)
  481. {
  482. return nand->memorg.pages_per_eraseblock *
  483. nand->memorg.eraseblocks_per_lun *
  484. nand->memorg.luns_per_target;
  485. }
  486. /**
  487. * nanddev_per_page_oobsize() - Get NAND erase block size
  488. * @nand: NAND device
  489. *
  490. * Return: the eraseblock size.
  491. */
  492. static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
  493. {
  494. return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
  495. }
  496. /**
  497. * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
  498. * @nand: NAND device
  499. *
  500. * Return: the number of eraseblocks per LUN.
  501. */
  502. static inline unsigned int
  503. nanddev_eraseblocks_per_lun(const struct nand_device *nand)
  504. {
  505. return nand->memorg.eraseblocks_per_lun;
  506. }
  507. /**
  508. * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
  509. * @nand: NAND device
  510. *
  511. * Return: the number of eraseblocks per target.
  512. */
  513. static inline unsigned int
  514. nanddev_eraseblocks_per_target(const struct nand_device *nand)
  515. {
  516. return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
  517. }
  518. /**
  519. * nanddev_target_size() - Get the total size provided by a single target/die
  520. * @nand: NAND device
  521. *
  522. * Return: the total size exposed by a single target/die in bytes.
  523. */
  524. static inline u64 nanddev_target_size(const struct nand_device *nand)
  525. {
  526. return (u64)nand->memorg.luns_per_target *
  527. nand->memorg.eraseblocks_per_lun *
  528. nand->memorg.pages_per_eraseblock *
  529. nand->memorg.pagesize;
  530. }
  531. /**
  532. * nanddev_ntarget() - Get the total of targets
  533. * @nand: NAND device
  534. *
  535. * Return: the number of targets/dies exposed by @nand.
  536. */
  537. static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
  538. {
  539. return nand->memorg.ntargets;
  540. }
  541. /**
  542. * nanddev_neraseblocks() - Get the total number of eraseblocks
  543. * @nand: NAND device
  544. *
  545. * Return: the total number of eraseblocks exposed by @nand.
  546. */
  547. static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
  548. {
  549. return nand->memorg.ntargets * nand->memorg.luns_per_target *
  550. nand->memorg.eraseblocks_per_lun;
  551. }
  552. /**
  553. * nanddev_size() - Get NAND size
  554. * @nand: NAND device
  555. *
  556. * Return: the total size (in bytes) exposed by @nand.
  557. */
  558. static inline u64 nanddev_size(const struct nand_device *nand)
  559. {
  560. return nanddev_target_size(nand) * nanddev_ntargets(nand);
  561. }
  562. /**
  563. * nanddev_get_memorg() - Extract memory organization info from a NAND device
  564. * @nand: NAND device
  565. *
  566. * This can be used by the upper layer to fill the memorg info before calling
  567. * nanddev_init().
  568. *
  569. * Return: the memorg object embedded in the NAND device.
  570. */
  571. static inline struct nand_memory_organization *
  572. nanddev_get_memorg(struct nand_device *nand)
  573. {
  574. return &nand->memorg;
  575. }
  576. /**
  577. * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
  578. * @nand: NAND device
  579. */
  580. static inline const struct nand_ecc_props *
  581. nanddev_get_ecc_conf(struct nand_device *nand)
  582. {
  583. return &nand->ecc.ctx.conf;
  584. }
  585. /**
  586. * nanddev_get_ecc_nsteps() - Extract the number of ECC steps
  587. * @nand: NAND device
  588. */
  589. static inline unsigned int
  590. nanddev_get_ecc_nsteps(struct nand_device *nand)
  591. {
  592. return nand->ecc.ctx.nsteps;
  593. }
  594. /**
  595. * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step
  596. * @nand: NAND device
  597. */
  598. static inline unsigned int
  599. nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
  600. {
  601. return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
  602. }
  603. /**
  604. * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
  605. * device
  606. * @nand: NAND device
  607. */
  608. static inline const struct nand_ecc_props *
  609. nanddev_get_ecc_requirements(struct nand_device *nand)
  610. {
  611. return &nand->ecc.requirements;
  612. }
  613. /**
  614. * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
  615. * device
  616. * @nand: NAND device
  617. * @reqs: Requirements
  618. */
  619. static inline void
  620. nanddev_set_ecc_requirements(struct nand_device *nand,
  621. const struct nand_ecc_props *reqs)
  622. {
  623. nand->ecc.requirements = *reqs;
  624. }
  625. int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
  626. struct module *owner);
  627. void nanddev_cleanup(struct nand_device *nand);
  628. /**
  629. * nanddev_register() - Register a NAND device
  630. * @nand: NAND device
  631. *
  632. * Register a NAND device.
  633. * This function is just a wrapper around mtd_device_register()
  634. * registering the MTD device embedded in @nand.
  635. *
  636. * Return: 0 in case of success, a negative error code otherwise.
  637. */
  638. static inline int nanddev_register(struct nand_device *nand)
  639. {
  640. return mtd_device_register(&nand->mtd, NULL, 0);
  641. }
  642. /**
  643. * nanddev_unregister() - Unregister a NAND device
  644. * @nand: NAND device
  645. *
  646. * Unregister a NAND device.
  647. * This function is just a wrapper around mtd_device_unregister()
  648. * unregistering the MTD device embedded in @nand.
  649. *
  650. * Return: 0 in case of success, a negative error code otherwise.
  651. */
  652. static inline int nanddev_unregister(struct nand_device *nand)
  653. {
  654. return mtd_device_unregister(&nand->mtd);
  655. }
  656. /**
  657. * nanddev_set_of_node() - Attach a DT node to a NAND device
  658. * @nand: NAND device
  659. * @np: DT node
  660. *
  661. * Attach a DT node to a NAND device.
  662. */
  663. static inline void nanddev_set_of_node(struct nand_device *nand,
  664. struct device_node *np)
  665. {
  666. mtd_set_of_node(&nand->mtd, np);
  667. }
  668. /**
  669. * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
  670. * @nand: NAND device
  671. *
  672. * Return: the DT node attached to @nand.
  673. */
  674. static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
  675. {
  676. return mtd_get_of_node(&nand->mtd);
  677. }
  678. /**
  679. * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
  680. * @nand: NAND device
  681. * @offs: absolute NAND offset (usually passed by the MTD layer)
  682. * @pos: a NAND position object to fill in
  683. *
  684. * Converts @offs into a nand_pos representation.
  685. *
  686. * Return: the offset within the NAND page pointed by @pos.
  687. */
  688. static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
  689. loff_t offs,
  690. struct nand_pos *pos)
  691. {
  692. unsigned int pageoffs;
  693. u64 tmp = offs;
  694. pageoffs = do_div(tmp, nand->memorg.pagesize);
  695. pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
  696. pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
  697. pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
  698. pos->lun = do_div(tmp, nand->memorg.luns_per_target);
  699. pos->target = tmp;
  700. return pageoffs;
  701. }
  702. /**
  703. * nanddev_pos_cmp() - Compare two NAND positions
  704. * @a: First NAND position
  705. * @b: Second NAND position
  706. *
  707. * Compares two NAND positions.
  708. *
  709. * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
  710. */
  711. static inline int nanddev_pos_cmp(const struct nand_pos *a,
  712. const struct nand_pos *b)
  713. {
  714. if (a->target != b->target)
  715. return a->target < b->target ? -1 : 1;
  716. if (a->lun != b->lun)
  717. return a->lun < b->lun ? -1 : 1;
  718. if (a->eraseblock != b->eraseblock)
  719. return a->eraseblock < b->eraseblock ? -1 : 1;
  720. if (a->page != b->page)
  721. return a->page < b->page ? -1 : 1;
  722. return 0;
  723. }
  724. /**
  725. * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
  726. * @nand: NAND device
  727. * @pos: the NAND position to convert
  728. *
  729. * Converts @pos NAND position into an absolute offset.
  730. *
  731. * Return: the absolute offset. Note that @pos points to the beginning of a
  732. * page, if one wants to point to a specific offset within this page
  733. * the returned offset has to be adjusted manually.
  734. */
  735. static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
  736. const struct nand_pos *pos)
  737. {
  738. unsigned int npages;
  739. npages = pos->page +
  740. ((pos->eraseblock +
  741. (pos->lun +
  742. (pos->target * nand->memorg.luns_per_target)) *
  743. nand->memorg.eraseblocks_per_lun) *
  744. nand->memorg.pages_per_eraseblock);
  745. return (loff_t)npages * nand->memorg.pagesize;
  746. }
  747. /**
  748. * nanddev_pos_to_row() - Extract a row address from a NAND position
  749. * @nand: NAND device
  750. * @pos: the position to convert
  751. *
  752. * Converts a NAND position into a row address that can then be passed to the
  753. * device.
  754. *
  755. * Return: the row address extracted from @pos.
  756. */
  757. static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
  758. const struct nand_pos *pos)
  759. {
  760. return (pos->lun << nand->rowconv.lun_addr_shift) |
  761. (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
  762. pos->page;
  763. }
  764. /**
  765. * nanddev_pos_next_target() - Move a position to the next target/die
  766. * @nand: NAND device
  767. * @pos: the position to update
  768. *
  769. * Updates @pos to point to the start of the next target/die. Useful when you
  770. * want to iterate over all targets/dies of a NAND device.
  771. */
  772. static inline void nanddev_pos_next_target(struct nand_device *nand,
  773. struct nand_pos *pos)
  774. {
  775. pos->page = 0;
  776. pos->plane = 0;
  777. pos->eraseblock = 0;
  778. pos->lun = 0;
  779. pos->target++;
  780. }
  781. /**
  782. * nanddev_pos_next_lun() - Move a position to the next LUN
  783. * @nand: NAND device
  784. * @pos: the position to update
  785. *
  786. * Updates @pos to point to the start of the next LUN. Useful when you want to
  787. * iterate over all LUNs of a NAND device.
  788. */
  789. static inline void nanddev_pos_next_lun(struct nand_device *nand,
  790. struct nand_pos *pos)
  791. {
  792. if (pos->lun >= nand->memorg.luns_per_target - 1)
  793. return nanddev_pos_next_target(nand, pos);
  794. pos->lun++;
  795. pos->page = 0;
  796. pos->plane = 0;
  797. pos->eraseblock = 0;
  798. }
  799. /**
  800. * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
  801. * @nand: NAND device
  802. * @pos: the position to update
  803. *
  804. * Updates @pos to point to the start of the next eraseblock. Useful when you
  805. * want to iterate over all eraseblocks of a NAND device.
  806. */
  807. static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
  808. struct nand_pos *pos)
  809. {
  810. if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
  811. return nanddev_pos_next_lun(nand, pos);
  812. pos->eraseblock++;
  813. pos->page = 0;
  814. pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
  815. }
  816. /**
  817. * nanddev_pos_next_page() - Move a position to the next page
  818. * @nand: NAND device
  819. * @pos: the position to update
  820. *
  821. * Updates @pos to point to the start of the next page. Useful when you want to
  822. * iterate over all pages of a NAND device.
  823. */
  824. static inline void nanddev_pos_next_page(struct nand_device *nand,
  825. struct nand_pos *pos)
  826. {
  827. if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
  828. return nanddev_pos_next_eraseblock(nand, pos);
  829. pos->page++;
  830. }
  831. /**
  832. * nand_io_iter_init - Initialize a NAND I/O iterator
  833. * @nand: NAND device
  834. * @offs: absolute offset
  835. * @req: MTD request
  836. * @iter: NAND I/O iterator
  837. *
  838. * Initializes a NAND iterator based on the information passed by the MTD
  839. * layer.
  840. */
  841. static inline void nanddev_io_iter_init(struct nand_device *nand,
  842. enum nand_page_io_req_type reqtype,
  843. loff_t offs, struct mtd_oob_ops *req,
  844. struct nand_io_iter *iter)
  845. {
  846. struct mtd_info *mtd = nanddev_to_mtd(nand);
  847. iter->req.type = reqtype;
  848. iter->req.mode = req->mode;
  849. iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
  850. iter->req.ooboffs = req->ooboffs;
  851. iter->oobbytes_per_page = mtd_oobavail(mtd, req);
  852. iter->dataleft = req->len;
  853. iter->oobleft = req->ooblen;
  854. iter->req.databuf.in = req->datbuf;
  855. iter->req.datalen = min_t(unsigned int,
  856. nand->memorg.pagesize - iter->req.dataoffs,
  857. iter->dataleft);
  858. iter->req.oobbuf.in = req->oobbuf;
  859. iter->req.ooblen = min_t(unsigned int,
  860. iter->oobbytes_per_page - iter->req.ooboffs,
  861. iter->oobleft);
  862. }
  863. /**
  864. * nand_io_iter_next_page - Move to the next page
  865. * @nand: NAND device
  866. * @iter: NAND I/O iterator
  867. *
  868. * Updates the @iter to point to the next page.
  869. */
  870. static inline void nanddev_io_iter_next_page(struct nand_device *nand,
  871. struct nand_io_iter *iter)
  872. {
  873. nanddev_pos_next_page(nand, &iter->req.pos);
  874. iter->dataleft -= iter->req.datalen;
  875. iter->req.databuf.in += iter->req.datalen;
  876. iter->oobleft -= iter->req.ooblen;
  877. iter->req.oobbuf.in += iter->req.ooblen;
  878. iter->req.dataoffs = 0;
  879. iter->req.ooboffs = 0;
  880. iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
  881. iter->dataleft);
  882. iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
  883. iter->oobleft);
  884. }
  885. /**
  886. * nand_io_iter_end - Should end iteration or not
  887. * @nand: NAND device
  888. * @iter: NAND I/O iterator
  889. *
  890. * Check whether @iter has reached the end of the NAND portion it was asked to
  891. * iterate on or not.
  892. *
  893. * Return: true if @iter has reached the end of the iteration request, false
  894. * otherwise.
  895. */
  896. static inline bool nanddev_io_iter_end(struct nand_device *nand,
  897. const struct nand_io_iter *iter)
  898. {
  899. if (iter->dataleft || iter->oobleft)
  900. return false;
  901. return true;
  902. }
  903. /**
  904. * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
  905. * request
  906. * @nand: NAND device
  907. * @start: start address to read/write from
  908. * @req: MTD I/O request
  909. * @iter: NAND I/O iterator
  910. *
  911. * Should be used for iterate over pages that are contained in an MTD request.
  912. */
  913. #define nanddev_io_for_each_page(nand, type, start, req, iter) \
  914. for (nanddev_io_iter_init(nand, type, start, req, iter); \
  915. !nanddev_io_iter_end(nand, iter); \
  916. nanddev_io_iter_next_page(nand, iter))
  917. bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
  918. bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
  919. int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
  920. int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
  921. /* ECC related functions */
  922. int nanddev_ecc_engine_init(struct nand_device *nand);
  923. void nanddev_ecc_engine_cleanup(struct nand_device *nand);
  924. static inline void *nand_to_ecc_ctx(struct nand_device *nand)
  925. {
  926. return nand->ecc.ctx.priv;
  927. }
  928. /* BBT related functions */
  929. enum nand_bbt_block_status {
  930. NAND_BBT_BLOCK_STATUS_UNKNOWN,
  931. NAND_BBT_BLOCK_GOOD,
  932. NAND_BBT_BLOCK_WORN,
  933. NAND_BBT_BLOCK_RESERVED,
  934. NAND_BBT_BLOCK_FACTORY_BAD,
  935. NAND_BBT_BLOCK_NUM_STATUS,
  936. };
  937. int nanddev_bbt_init(struct nand_device *nand);
  938. void nanddev_bbt_cleanup(struct nand_device *nand);
  939. int nanddev_bbt_update(struct nand_device *nand);
  940. int nanddev_bbt_get_block_status(const struct nand_device *nand,
  941. unsigned int entry);
  942. int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
  943. enum nand_bbt_block_status status);
  944. int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
  945. /**
  946. * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
  947. * @nand: NAND device
  948. * @pos: the NAND position we want to get BBT entry for
  949. *
  950. * Return the BBT entry used to store information about the eraseblock pointed
  951. * by @pos.
  952. *
  953. * Return: the BBT entry storing information about eraseblock pointed by @pos.
  954. */
  955. static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
  956. const struct nand_pos *pos)
  957. {
  958. return pos->eraseblock +
  959. ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
  960. nand->memorg.eraseblocks_per_lun);
  961. }
  962. /**
  963. * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
  964. * @nand: NAND device
  965. *
  966. * Return: true if the BBT has been initialized, false otherwise.
  967. */
  968. static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
  969. {
  970. return !!nand->bbt.cache;
  971. }
  972. /* MTD -> NAND helper functions. */
  973. int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
  974. int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
  975. #endif /* __LINUX_MTD_NAND_H */