spinand.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2016-2017 Micron Technology, Inc.
  4. *
  5. * Authors:
  6. * Peter Pan <[email protected]>
  7. */
  8. #ifndef __LINUX_MTD_SPINAND_H
  9. #define __LINUX_MTD_SPINAND_H
  10. #include <linux/mutex.h>
  11. #include <linux/bitops.h>
  12. #include <linux/device.h>
  13. #include <linux/mtd/mtd.h>
  14. #include <linux/mtd/nand.h>
  15. #include <linux/spi/spi.h>
  16. #include <linux/spi/spi-mem.h>
  17. /**
  18. * Standard SPI NAND flash operations
  19. */
  20. #define SPINAND_RESET_OP \
  21. SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
  22. SPI_MEM_OP_NO_ADDR, \
  23. SPI_MEM_OP_NO_DUMMY, \
  24. SPI_MEM_OP_NO_DATA)
  25. #define SPINAND_WR_EN_DIS_OP(enable) \
  26. SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
  27. SPI_MEM_OP_NO_ADDR, \
  28. SPI_MEM_OP_NO_DUMMY, \
  29. SPI_MEM_OP_NO_DATA)
  30. #define SPINAND_READID_OP(naddr, ndummy, buf, len) \
  31. SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
  32. SPI_MEM_OP_ADDR(naddr, 0, 1), \
  33. SPI_MEM_OP_DUMMY(ndummy, 1), \
  34. SPI_MEM_OP_DATA_IN(len, buf, 1))
  35. #define SPINAND_SET_FEATURE_OP(reg, valptr) \
  36. SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
  37. SPI_MEM_OP_ADDR(1, reg, 1), \
  38. SPI_MEM_OP_NO_DUMMY, \
  39. SPI_MEM_OP_DATA_OUT(1, valptr, 1))
  40. #define SPINAND_GET_FEATURE_OP(reg, valptr) \
  41. SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
  42. SPI_MEM_OP_ADDR(1, reg, 1), \
  43. SPI_MEM_OP_NO_DUMMY, \
  44. SPI_MEM_OP_DATA_IN(1, valptr, 1))
  45. #define SPINAND_BLK_ERASE_OP(addr) \
  46. SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
  47. SPI_MEM_OP_ADDR(3, addr, 1), \
  48. SPI_MEM_OP_NO_DUMMY, \
  49. SPI_MEM_OP_NO_DATA)
  50. #define SPINAND_PAGE_READ_OP(addr) \
  51. SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
  52. SPI_MEM_OP_ADDR(3, addr, 1), \
  53. SPI_MEM_OP_NO_DUMMY, \
  54. SPI_MEM_OP_NO_DATA)
  55. #define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
  56. SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
  57. SPI_MEM_OP_ADDR(2, addr, 1), \
  58. SPI_MEM_OP_DUMMY(ndummy, 1), \
  59. SPI_MEM_OP_DATA_IN(len, buf, 1))
  60. #define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
  61. SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
  62. SPI_MEM_OP_ADDR(3, addr, 1), \
  63. SPI_MEM_OP_DUMMY(ndummy, 1), \
  64. SPI_MEM_OP_DATA_IN(len, buf, 1))
  65. #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
  66. SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
  67. SPI_MEM_OP_ADDR(2, addr, 1), \
  68. SPI_MEM_OP_DUMMY(ndummy, 1), \
  69. SPI_MEM_OP_DATA_IN(len, buf, 2))
  70. #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \
  71. SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
  72. SPI_MEM_OP_ADDR(3, addr, 1), \
  73. SPI_MEM_OP_DUMMY(ndummy, 1), \
  74. SPI_MEM_OP_DATA_IN(len, buf, 2))
  75. #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
  76. SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
  77. SPI_MEM_OP_ADDR(2, addr, 1), \
  78. SPI_MEM_OP_DUMMY(ndummy, 1), \
  79. SPI_MEM_OP_DATA_IN(len, buf, 4))
  80. #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \
  81. SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
  82. SPI_MEM_OP_ADDR(3, addr, 1), \
  83. SPI_MEM_OP_DUMMY(ndummy, 1), \
  84. SPI_MEM_OP_DATA_IN(len, buf, 4))
  85. #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
  86. SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
  87. SPI_MEM_OP_ADDR(2, addr, 2), \
  88. SPI_MEM_OP_DUMMY(ndummy, 2), \
  89. SPI_MEM_OP_DATA_IN(len, buf, 2))
  90. #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
  91. SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
  92. SPI_MEM_OP_ADDR(3, addr, 2), \
  93. SPI_MEM_OP_DUMMY(ndummy, 2), \
  94. SPI_MEM_OP_DATA_IN(len, buf, 2))
  95. #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
  96. SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
  97. SPI_MEM_OP_ADDR(2, addr, 4), \
  98. SPI_MEM_OP_DUMMY(ndummy, 4), \
  99. SPI_MEM_OP_DATA_IN(len, buf, 4))
  100. #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
  101. SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
  102. SPI_MEM_OP_ADDR(3, addr, 4), \
  103. SPI_MEM_OP_DUMMY(ndummy, 4), \
  104. SPI_MEM_OP_DATA_IN(len, buf, 4))
  105. #define SPINAND_PROG_EXEC_OP(addr) \
  106. SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
  107. SPI_MEM_OP_ADDR(3, addr, 1), \
  108. SPI_MEM_OP_NO_DUMMY, \
  109. SPI_MEM_OP_NO_DATA)
  110. #define SPINAND_PROG_LOAD(reset, addr, buf, len) \
  111. SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
  112. SPI_MEM_OP_ADDR(2, addr, 1), \
  113. SPI_MEM_OP_NO_DUMMY, \
  114. SPI_MEM_OP_DATA_OUT(len, buf, 1))
  115. #define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
  116. SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
  117. SPI_MEM_OP_ADDR(2, addr, 1), \
  118. SPI_MEM_OP_NO_DUMMY, \
  119. SPI_MEM_OP_DATA_OUT(len, buf, 4))
  120. /**
  121. * Standard SPI NAND flash commands
  122. */
  123. #define SPINAND_CMD_PROG_LOAD_X4 0x32
  124. #define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
  125. /* feature register */
  126. #define REG_BLOCK_LOCK 0xa0
  127. #define BL_ALL_UNLOCKED 0x00
  128. /* configuration register */
  129. #define REG_CFG 0xb0
  130. #define CFG_OTP_ENABLE BIT(6)
  131. #define CFG_ECC_ENABLE BIT(4)
  132. #define CFG_QUAD_ENABLE BIT(0)
  133. /* status register */
  134. #define REG_STATUS 0xc0
  135. #define STATUS_BUSY BIT(0)
  136. #define STATUS_ERASE_FAILED BIT(2)
  137. #define STATUS_PROG_FAILED BIT(3)
  138. #define STATUS_ECC_MASK GENMASK(5, 4)
  139. #define STATUS_ECC_NO_BITFLIPS (0 << 4)
  140. #define STATUS_ECC_HAS_BITFLIPS (1 << 4)
  141. #define STATUS_ECC_UNCOR_ERROR (2 << 4)
  142. struct spinand_op;
  143. struct spinand_device;
  144. #define SPINAND_MAX_ID_LEN 4
  145. /*
  146. * For erase, write and read operation, we got the following timings :
  147. * tBERS (erase) 1ms to 4ms
  148. * tPROG 300us to 400us
  149. * tREAD 25us to 100us
  150. * In order to minimize latency, the min value is divided by 4 for the
  151. * initial delay, and dividing by 20 for the poll delay.
  152. * For reset, 5us/10us/500us if the device is respectively
  153. * reading/programming/erasing when the RESET occurs. Since we always
  154. * issue a RESET when the device is IDLE, 5us is selected for both initial
  155. * and poll delay.
  156. */
  157. #define SPINAND_READ_INITIAL_DELAY_US 6
  158. #define SPINAND_READ_POLL_DELAY_US 5
  159. #define SPINAND_RESET_INITIAL_DELAY_US 5
  160. #define SPINAND_RESET_POLL_DELAY_US 5
  161. #define SPINAND_WRITE_INITIAL_DELAY_US 75
  162. #define SPINAND_WRITE_POLL_DELAY_US 15
  163. #define SPINAND_ERASE_INITIAL_DELAY_US 250
  164. #define SPINAND_ERASE_POLL_DELAY_US 50
  165. #define SPINAND_WAITRDY_TIMEOUT_MS 400
  166. /**
  167. * struct spinand_id - SPI NAND id structure
  168. * @data: buffer containing the id bytes. Currently 4 bytes large, but can
  169. * be extended if required
  170. * @len: ID length
  171. */
  172. struct spinand_id {
  173. u8 data[SPINAND_MAX_ID_LEN];
  174. int len;
  175. };
  176. enum spinand_readid_method {
  177. SPINAND_READID_METHOD_OPCODE,
  178. SPINAND_READID_METHOD_OPCODE_ADDR,
  179. SPINAND_READID_METHOD_OPCODE_DUMMY,
  180. };
  181. /**
  182. * struct spinand_devid - SPI NAND device id structure
  183. * @id: device id of current chip
  184. * @len: number of bytes in device id
  185. * @method: method to read chip id
  186. * There are 3 possible variants:
  187. * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately
  188. * after read_id opcode.
  189. * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after
  190. * read_id opcode + 1-byte address.
  191. * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after
  192. * read_id opcode + 1 dummy byte.
  193. */
  194. struct spinand_devid {
  195. const u8 *id;
  196. const u8 len;
  197. const enum spinand_readid_method method;
  198. };
  199. /**
  200. * struct manufacurer_ops - SPI NAND manufacturer specific operations
  201. * @init: initialize a SPI NAND device
  202. * @cleanup: cleanup a SPI NAND device
  203. *
  204. * Each SPI NAND manufacturer driver should implement this interface so that
  205. * NAND chips coming from this vendor can be initialized properly.
  206. */
  207. struct spinand_manufacturer_ops {
  208. int (*init)(struct spinand_device *spinand);
  209. void (*cleanup)(struct spinand_device *spinand);
  210. };
  211. /**
  212. * struct spinand_manufacturer - SPI NAND manufacturer instance
  213. * @id: manufacturer ID
  214. * @name: manufacturer name
  215. * @devid_len: number of bytes in device ID
  216. * @chips: supported SPI NANDs under current manufacturer
  217. * @nchips: number of SPI NANDs available in chips array
  218. * @ops: manufacturer operations
  219. */
  220. struct spinand_manufacturer {
  221. u8 id;
  222. char *name;
  223. const struct spinand_info *chips;
  224. const size_t nchips;
  225. const struct spinand_manufacturer_ops *ops;
  226. };
  227. /* SPI NAND manufacturers */
  228. extern const struct spinand_manufacturer ato_spinand_manufacturer;
  229. extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
  230. extern const struct spinand_manufacturer macronix_spinand_manufacturer;
  231. extern const struct spinand_manufacturer micron_spinand_manufacturer;
  232. extern const struct spinand_manufacturer paragon_spinand_manufacturer;
  233. extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
  234. extern const struct spinand_manufacturer winbond_spinand_manufacturer;
  235. extern const struct spinand_manufacturer xtx_spinand_manufacturer;
  236. /**
  237. * struct spinand_op_variants - SPI NAND operation variants
  238. * @ops: the list of variants for a given operation
  239. * @nops: the number of variants
  240. *
  241. * Some operations like read-from-cache/write-to-cache have several variants
  242. * depending on the number of IO lines you use to transfer data or address
  243. * cycles. This structure is a way to describe the different variants supported
  244. * by a chip and let the core pick the best one based on the SPI mem controller
  245. * capabilities.
  246. */
  247. struct spinand_op_variants {
  248. const struct spi_mem_op *ops;
  249. unsigned int nops;
  250. };
  251. #define SPINAND_OP_VARIANTS(name, ...) \
  252. const struct spinand_op_variants name = { \
  253. .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
  254. .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
  255. sizeof(struct spi_mem_op), \
  256. }
  257. /**
  258. * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
  259. * chip
  260. * @get_status: get the ECC status. Should return a positive number encoding
  261. * the number of corrected bitflips if correction was possible or
  262. * -EBADMSG if there are uncorrectable errors. I can also return
  263. * other negative error codes if the error is not caused by
  264. * uncorrectable bitflips
  265. * @ooblayout: the OOB layout used by the on-die ECC implementation
  266. */
  267. struct spinand_ecc_info {
  268. int (*get_status)(struct spinand_device *spinand, u8 status);
  269. const struct mtd_ooblayout_ops *ooblayout;
  270. };
  271. #define SPINAND_HAS_QE_BIT BIT(0)
  272. #define SPINAND_HAS_CR_FEAT_BIT BIT(1)
  273. /**
  274. * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
  275. * @status: status of the last wait operation that will be used in case
  276. * ->get_status() is not populated by the spinand device.
  277. */
  278. struct spinand_ondie_ecc_conf {
  279. u8 status;
  280. };
  281. /**
  282. * struct spinand_info - Structure used to describe SPI NAND chips
  283. * @model: model name
  284. * @devid: device ID
  285. * @flags: OR-ing of the SPINAND_XXX flags
  286. * @memorg: memory organization
  287. * @eccreq: ECC requirements
  288. * @eccinfo: on-die ECC info
  289. * @op_variants: operations variants
  290. * @op_variants.read_cache: variants of the read-cache operation
  291. * @op_variants.write_cache: variants of the write-cache operation
  292. * @op_variants.update_cache: variants of the update-cache operation
  293. * @select_target: function used to select a target/die. Required only for
  294. * multi-die chips
  295. *
  296. * Each SPI NAND manufacturer driver should have a spinand_info table
  297. * describing all the chips supported by the driver.
  298. */
  299. struct spinand_info {
  300. const char *model;
  301. struct spinand_devid devid;
  302. u32 flags;
  303. struct nand_memory_organization memorg;
  304. struct nand_ecc_props eccreq;
  305. struct spinand_ecc_info eccinfo;
  306. struct {
  307. const struct spinand_op_variants *read_cache;
  308. const struct spinand_op_variants *write_cache;
  309. const struct spinand_op_variants *update_cache;
  310. } op_variants;
  311. int (*select_target)(struct spinand_device *spinand,
  312. unsigned int target);
  313. };
  314. #define SPINAND_ID(__method, ...) \
  315. { \
  316. .id = (const u8[]){ __VA_ARGS__ }, \
  317. .len = sizeof((u8[]){ __VA_ARGS__ }), \
  318. .method = __method, \
  319. }
  320. #define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
  321. { \
  322. .read_cache = __read, \
  323. .write_cache = __write, \
  324. .update_cache = __update, \
  325. }
  326. #define SPINAND_ECCINFO(__ooblayout, __get_status) \
  327. .eccinfo = { \
  328. .ooblayout = __ooblayout, \
  329. .get_status = __get_status, \
  330. }
  331. #define SPINAND_SELECT_TARGET(__func) \
  332. .select_target = __func,
  333. #define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
  334. __flags, ...) \
  335. { \
  336. .model = __model, \
  337. .devid = __id, \
  338. .memorg = __memorg, \
  339. .eccreq = __eccreq, \
  340. .op_variants = __op_variants, \
  341. .flags = __flags, \
  342. __VA_ARGS__ \
  343. }
  344. struct spinand_dirmap {
  345. struct spi_mem_dirmap_desc *wdesc;
  346. struct spi_mem_dirmap_desc *rdesc;
  347. struct spi_mem_dirmap_desc *wdesc_ecc;
  348. struct spi_mem_dirmap_desc *rdesc_ecc;
  349. };
  350. /**
  351. * struct spinand_device - SPI NAND device instance
  352. * @base: NAND device instance
  353. * @spimem: pointer to the SPI mem object
  354. * @lock: lock used to serialize accesses to the NAND
  355. * @id: NAND ID as returned by READ_ID
  356. * @flags: NAND flags
  357. * @op_templates: various SPI mem op templates
  358. * @op_templates.read_cache: read cache op template
  359. * @op_templates.write_cache: write cache op template
  360. * @op_templates.update_cache: update cache op template
  361. * @select_target: select a specific target/die. Usually called before sending
  362. * a command addressing a page or an eraseblock embedded in
  363. * this die. Only required if your chip exposes several dies
  364. * @cur_target: currently selected target/die
  365. * @eccinfo: on-die ECC information
  366. * @cfg_cache: config register cache. One entry per die
  367. * @databuf: bounce buffer for data
  368. * @oobbuf: bounce buffer for OOB data
  369. * @scratchbuf: buffer used for everything but page accesses. This is needed
  370. * because the spi-mem interface explicitly requests that buffers
  371. * passed in spi_mem_op be DMA-able, so we can't based the bufs on
  372. * the stack
  373. * @manufacturer: SPI NAND manufacturer information
  374. * @priv: manufacturer private data
  375. */
  376. struct spinand_device {
  377. struct nand_device base;
  378. struct spi_mem *spimem;
  379. struct mutex lock;
  380. struct spinand_id id;
  381. u32 flags;
  382. struct {
  383. const struct spi_mem_op *read_cache;
  384. const struct spi_mem_op *write_cache;
  385. const struct spi_mem_op *update_cache;
  386. } op_templates;
  387. struct spinand_dirmap *dirmaps;
  388. int (*select_target)(struct spinand_device *spinand,
  389. unsigned int target);
  390. unsigned int cur_target;
  391. struct spinand_ecc_info eccinfo;
  392. u8 *cfg_cache;
  393. u8 *databuf;
  394. u8 *oobbuf;
  395. u8 *scratchbuf;
  396. const struct spinand_manufacturer *manufacturer;
  397. void *priv;
  398. };
  399. /**
  400. * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
  401. * @mtd: MTD instance
  402. *
  403. * Return: the SPI NAND device attached to @mtd.
  404. */
  405. static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
  406. {
  407. return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
  408. }
  409. /**
  410. * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
  411. * @spinand: SPI NAND device
  412. *
  413. * Return: the MTD device embedded in @spinand.
  414. */
  415. static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
  416. {
  417. return nanddev_to_mtd(&spinand->base);
  418. }
  419. /**
  420. * nand_to_spinand() - Get the SPI NAND device embedding an NAND object
  421. * @nand: NAND object
  422. *
  423. * Return: the SPI NAND device embedding @nand.
  424. */
  425. static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
  426. {
  427. return container_of(nand, struct spinand_device, base);
  428. }
  429. /**
  430. * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
  431. * @spinand: SPI NAND device
  432. *
  433. * Return: the NAND device embedded in @spinand.
  434. */
  435. static inline struct nand_device *
  436. spinand_to_nand(struct spinand_device *spinand)
  437. {
  438. return &spinand->base;
  439. }
  440. /**
  441. * spinand_set_of_node - Attach a DT node to a SPI NAND device
  442. * @spinand: SPI NAND device
  443. * @np: DT node
  444. *
  445. * Attach a DT node to a SPI NAND device.
  446. */
  447. static inline void spinand_set_of_node(struct spinand_device *spinand,
  448. struct device_node *np)
  449. {
  450. nanddev_set_of_node(&spinand->base, np);
  451. }
  452. int spinand_match_and_init(struct spinand_device *spinand,
  453. const struct spinand_info *table,
  454. unsigned int table_size,
  455. enum spinand_readid_method rdid_method);
  456. int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
  457. int spinand_select_target(struct spinand_device *spinand, unsigned int target);
  458. #endif /* __LINUX_MTD_SPINAND_H */