nand_hynix.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2017 Free Electrons
  4. * Copyright (C) 2017 NextThing Co
  5. *
  6. * Author: Boris Brezillon <[email protected]>
  7. */
  8. #include <linux/sizes.h>
  9. #include <linux/slab.h>
  10. #include "internals.h"
  11. #define NAND_HYNIX_CMD_SET_PARAMS 0x36
  12. #define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
  13. #define NAND_HYNIX_1XNM_RR_REPEAT 8
  14. /**
  15. * struct hynix_read_retry - read-retry data
  16. * @nregs: number of register to set when applying a new read-retry mode
  17. * @regs: register offsets (NAND chip dependent)
  18. * @values: array of values to set in registers. The array size is equal to
  19. * (nregs * nmodes)
  20. */
  21. struct hynix_read_retry {
  22. int nregs;
  23. const u8 *regs;
  24. u8 values[];
  25. };
  26. /**
  27. * struct hynix_nand - private Hynix NAND struct
  28. * @nand_technology: manufacturing process expressed in picometer
  29. * @read_retry: read-retry information
  30. */
  31. struct hynix_nand {
  32. const struct hynix_read_retry *read_retry;
  33. };
  34. /**
  35. * struct hynix_read_retry_otp - structure describing how the read-retry OTP
  36. * area
  37. * @nregs: number of hynix private registers to set before reading the reading
  38. * the OTP area
  39. * @regs: registers that should be configured
  40. * @values: values that should be set in regs
  41. * @page: the address to pass to the READ_PAGE command. Depends on the NAND
  42. * chip
  43. * @size: size of the read-retry OTP section
  44. */
  45. struct hynix_read_retry_otp {
  46. int nregs;
  47. const u8 *regs;
  48. const u8 *values;
  49. int page;
  50. int size;
  51. };
  52. static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
  53. {
  54. u8 jedecid[5] = { };
  55. int ret;
  56. ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
  57. if (ret)
  58. return false;
  59. return !strncmp("JEDEC", jedecid, sizeof(jedecid));
  60. }
  61. static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
  62. {
  63. if (nand_has_exec_op(chip)) {
  64. struct nand_op_instr instrs[] = {
  65. NAND_OP_CMD(cmd, 0),
  66. };
  67. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  68. return nand_exec_op(chip, &op);
  69. }
  70. chip->legacy.cmdfunc(chip, cmd, -1, -1);
  71. return 0;
  72. }
  73. static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
  74. {
  75. u16 column = ((u16)addr << 8) | addr;
  76. if (nand_has_exec_op(chip)) {
  77. struct nand_op_instr instrs[] = {
  78. NAND_OP_ADDR(1, &addr, 0),
  79. NAND_OP_8BIT_DATA_OUT(1, &val, 0),
  80. };
  81. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  82. return nand_exec_op(chip, &op);
  83. }
  84. chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1);
  85. chip->legacy.write_byte(chip, val);
  86. return 0;
  87. }
  88. static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
  89. {
  90. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  91. const u8 *values;
  92. int i, ret;
  93. values = hynix->read_retry->values +
  94. (retry_mode * hynix->read_retry->nregs);
  95. /* Enter 'Set Hynix Parameters' mode */
  96. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  97. if (ret)
  98. return ret;
  99. /*
  100. * Configure the NAND in the requested read-retry mode.
  101. * This is done by setting pre-defined values in internal NAND
  102. * registers.
  103. *
  104. * The set of registers is NAND specific, and the values are either
  105. * predefined or extracted from an OTP area on the NAND (values are
  106. * probably tweaked at production in this case).
  107. */
  108. for (i = 0; i < hynix->read_retry->nregs; i++) {
  109. ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
  110. values[i]);
  111. if (ret)
  112. return ret;
  113. }
  114. /* Apply the new settings. */
  115. return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  116. }
  117. /**
  118. * hynix_get_majority - get the value that is occurring the most in a given
  119. * set of values
  120. * @in: the array of values to test
  121. * @repeat: the size of the in array
  122. * @out: pointer used to store the output value
  123. *
  124. * This function implements the 'majority check' logic that is supposed to
  125. * overcome the unreliability of MLC NANDs when reading the OTP area storing
  126. * the read-retry parameters.
  127. *
  128. * It's based on a pretty simple assumption: if we repeat the same value
  129. * several times and then take the one that is occurring the most, we should
  130. * find the correct value.
  131. * Let's hope this dummy algorithm prevents us from losing the read-retry
  132. * parameters.
  133. */
  134. static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
  135. {
  136. int i, j, half = repeat / 2;
  137. /*
  138. * We only test the first half of the in array because we must ensure
  139. * that the value is at least occurring repeat / 2 times.
  140. *
  141. * This loop is suboptimal since we may count the occurrences of the
  142. * same value several time, but we are doing that on small sets, which
  143. * makes it acceptable.
  144. */
  145. for (i = 0; i < half; i++) {
  146. int cnt = 0;
  147. u8 val = in[i];
  148. /* Count all values that are matching the one at index i. */
  149. for (j = i + 1; j < repeat; j++) {
  150. if (in[j] == val)
  151. cnt++;
  152. }
  153. /* We found a value occurring more than repeat / 2. */
  154. if (cnt > half) {
  155. *out = val;
  156. return 0;
  157. }
  158. }
  159. return -EIO;
  160. }
  161. static int hynix_read_rr_otp(struct nand_chip *chip,
  162. const struct hynix_read_retry_otp *info,
  163. void *buf)
  164. {
  165. int i, ret;
  166. ret = nand_reset_op(chip);
  167. if (ret)
  168. return ret;
  169. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  170. if (ret)
  171. return ret;
  172. for (i = 0; i < info->nregs; i++) {
  173. ret = hynix_nand_reg_write_op(chip, info->regs[i],
  174. info->values[i]);
  175. if (ret)
  176. return ret;
  177. }
  178. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  179. if (ret)
  180. return ret;
  181. /* Sequence to enter OTP mode? */
  182. ret = hynix_nand_cmd_op(chip, 0x17);
  183. if (ret)
  184. return ret;
  185. ret = hynix_nand_cmd_op(chip, 0x4);
  186. if (ret)
  187. return ret;
  188. ret = hynix_nand_cmd_op(chip, 0x19);
  189. if (ret)
  190. return ret;
  191. /* Now read the page */
  192. ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
  193. if (ret)
  194. return ret;
  195. /* Put everything back to normal */
  196. ret = nand_reset_op(chip);
  197. if (ret)
  198. return ret;
  199. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  200. if (ret)
  201. return ret;
  202. ret = hynix_nand_reg_write_op(chip, 0x38, 0);
  203. if (ret)
  204. return ret;
  205. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  206. if (ret)
  207. return ret;
  208. return nand_read_page_op(chip, 0, 0, NULL, 0);
  209. }
  210. #define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
  211. #define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
  212. #define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
  213. (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
  214. static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
  215. int mode, int reg, bool inv, u8 *val)
  216. {
  217. u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
  218. int val_offs = (mode * nregs) + reg;
  219. int set_size = nmodes * nregs;
  220. int i, ret;
  221. for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
  222. int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
  223. tmp[i] = buf[val_offs + set_offs];
  224. }
  225. ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
  226. if (ret)
  227. return ret;
  228. if (inv)
  229. *val = ~*val;
  230. return 0;
  231. }
  232. static u8 hynix_1xnm_mlc_read_retry_regs[] = {
  233. 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
  234. };
  235. static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
  236. const struct hynix_read_retry_otp *info)
  237. {
  238. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  239. struct hynix_read_retry *rr = NULL;
  240. int ret, i, j;
  241. u8 nregs, nmodes;
  242. u8 *buf;
  243. buf = kmalloc(info->size, GFP_KERNEL);
  244. if (!buf)
  245. return -ENOMEM;
  246. ret = hynix_read_rr_otp(chip, info, buf);
  247. if (ret)
  248. goto out;
  249. ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
  250. &nmodes);
  251. if (ret)
  252. goto out;
  253. ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
  254. NAND_HYNIX_1XNM_RR_REPEAT,
  255. &nregs);
  256. if (ret)
  257. goto out;
  258. rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
  259. if (!rr) {
  260. ret = -ENOMEM;
  261. goto out;
  262. }
  263. for (i = 0; i < nmodes; i++) {
  264. for (j = 0; j < nregs; j++) {
  265. u8 *val = rr->values + (i * nregs);
  266. ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
  267. false, val);
  268. if (!ret)
  269. continue;
  270. ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
  271. true, val);
  272. if (ret)
  273. goto out;
  274. }
  275. }
  276. rr->nregs = nregs;
  277. rr->regs = hynix_1xnm_mlc_read_retry_regs;
  278. hynix->read_retry = rr;
  279. chip->ops.setup_read_retry = hynix_nand_setup_read_retry;
  280. chip->read_retries = nmodes;
  281. out:
  282. kfree(buf);
  283. if (ret)
  284. kfree(rr);
  285. return ret;
  286. }
  287. static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
  288. static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
  289. static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
  290. {
  291. .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
  292. .regs = hynix_mlc_1xnm_rr_otp_regs,
  293. .values = hynix_mlc_1xnm_rr_otp_values,
  294. .page = 0x21f,
  295. .size = 784
  296. },
  297. {
  298. .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
  299. .regs = hynix_mlc_1xnm_rr_otp_regs,
  300. .values = hynix_mlc_1xnm_rr_otp_values,
  301. .page = 0x200,
  302. .size = 528,
  303. },
  304. };
  305. static int hynix_nand_rr_init(struct nand_chip *chip)
  306. {
  307. int i, ret = 0;
  308. bool valid_jedecid;
  309. valid_jedecid = hynix_nand_has_valid_jedecid(chip);
  310. /*
  311. * We only support read-retry for 1xnm NANDs, and those NANDs all
  312. * expose a valid JEDEC ID.
  313. */
  314. if (valid_jedecid) {
  315. u8 nand_tech = chip->id.data[5] >> 4;
  316. /* 1xnm technology */
  317. if (nand_tech == 4) {
  318. for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
  319. i++) {
  320. /*
  321. * FIXME: Hynix recommend to copy the
  322. * read-retry OTP area into a normal page.
  323. */
  324. ret = hynix_mlc_1xnm_rr_init(chip,
  325. hynix_mlc_1xnm_rr_otps);
  326. if (!ret)
  327. break;
  328. }
  329. }
  330. }
  331. if (ret)
  332. pr_warn("failed to initialize read-retry infrastructure");
  333. return 0;
  334. }
  335. static void hynix_nand_extract_oobsize(struct nand_chip *chip,
  336. bool valid_jedecid)
  337. {
  338. struct mtd_info *mtd = nand_to_mtd(chip);
  339. struct nand_memory_organization *memorg;
  340. u8 oobsize;
  341. memorg = nanddev_get_memorg(&chip->base);
  342. oobsize = ((chip->id.data[3] >> 2) & 0x3) |
  343. ((chip->id.data[3] >> 4) & 0x4);
  344. if (valid_jedecid) {
  345. switch (oobsize) {
  346. case 0:
  347. memorg->oobsize = 2048;
  348. break;
  349. case 1:
  350. memorg->oobsize = 1664;
  351. break;
  352. case 2:
  353. memorg->oobsize = 1024;
  354. break;
  355. case 3:
  356. memorg->oobsize = 640;
  357. break;
  358. default:
  359. /*
  360. * We should never reach this case, but if that
  361. * happens, this probably means Hynix decided to use
  362. * a different extended ID format, and we should find
  363. * a way to support it.
  364. */
  365. WARN(1, "Invalid OOB size");
  366. break;
  367. }
  368. } else {
  369. switch (oobsize) {
  370. case 0:
  371. memorg->oobsize = 128;
  372. break;
  373. case 1:
  374. memorg->oobsize = 224;
  375. break;
  376. case 2:
  377. memorg->oobsize = 448;
  378. break;
  379. case 3:
  380. memorg->oobsize = 64;
  381. break;
  382. case 4:
  383. memorg->oobsize = 32;
  384. break;
  385. case 5:
  386. memorg->oobsize = 16;
  387. break;
  388. case 6:
  389. memorg->oobsize = 640;
  390. break;
  391. default:
  392. /*
  393. * We should never reach this case, but if that
  394. * happens, this probably means Hynix decided to use
  395. * a different extended ID format, and we should find
  396. * a way to support it.
  397. */
  398. WARN(1, "Invalid OOB size");
  399. break;
  400. }
  401. /*
  402. * The datasheet of H27UCG8T2BTR mentions that the "Redundant
  403. * Area Size" is encoded "per 8KB" (page size). This chip uses
  404. * a page size of 16KiB. The datasheet mentions an OOB size of
  405. * 1.280 bytes, but the OOB size encoded in the ID bytes (using
  406. * the existing logic above) is 640 bytes.
  407. * Update the OOB size for this chip by taking the value
  408. * determined above and scaling it to the actual page size (so
  409. * the actual OOB size for this chip is: 640 * 16k / 8k).
  410. */
  411. if (chip->id.data[1] == 0xde)
  412. memorg->oobsize *= memorg->pagesize / SZ_8K;
  413. }
  414. mtd->oobsize = memorg->oobsize;
  415. }
  416. static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
  417. bool valid_jedecid)
  418. {
  419. struct nand_device *base = &chip->base;
  420. struct nand_ecc_props requirements = {};
  421. u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
  422. if (valid_jedecid) {
  423. /* Reference: H27UCG8T2E datasheet */
  424. requirements.step_size = 1024;
  425. switch (ecc_level) {
  426. case 0:
  427. requirements.step_size = 0;
  428. requirements.strength = 0;
  429. break;
  430. case 1:
  431. requirements.strength = 4;
  432. break;
  433. case 2:
  434. requirements.strength = 24;
  435. break;
  436. case 3:
  437. requirements.strength = 32;
  438. break;
  439. case 4:
  440. requirements.strength = 40;
  441. break;
  442. case 5:
  443. requirements.strength = 50;
  444. break;
  445. case 6:
  446. requirements.strength = 60;
  447. break;
  448. default:
  449. /*
  450. * We should never reach this case, but if that
  451. * happens, this probably means Hynix decided to use
  452. * a different extended ID format, and we should find
  453. * a way to support it.
  454. */
  455. WARN(1, "Invalid ECC requirements");
  456. }
  457. } else {
  458. /*
  459. * The ECC requirements field meaning depends on the
  460. * NAND technology.
  461. */
  462. u8 nand_tech = chip->id.data[5] & 0x7;
  463. if (nand_tech < 3) {
  464. /* > 26nm, reference: H27UBG8T2A datasheet */
  465. if (ecc_level < 5) {
  466. requirements.step_size = 512;
  467. requirements.strength = 1 << ecc_level;
  468. } else if (ecc_level < 7) {
  469. if (ecc_level == 5)
  470. requirements.step_size = 2048;
  471. else
  472. requirements.step_size = 1024;
  473. requirements.strength = 24;
  474. } else {
  475. /*
  476. * We should never reach this case, but if that
  477. * happens, this probably means Hynix decided
  478. * to use a different extended ID format, and
  479. * we should find a way to support it.
  480. */
  481. WARN(1, "Invalid ECC requirements");
  482. }
  483. } else {
  484. /* <= 26nm, reference: H27UBG8T2B datasheet */
  485. if (!ecc_level) {
  486. requirements.step_size = 0;
  487. requirements.strength = 0;
  488. } else if (ecc_level < 5) {
  489. requirements.step_size = 512;
  490. requirements.strength = 1 << (ecc_level - 1);
  491. } else {
  492. requirements.step_size = 1024;
  493. requirements.strength = 24 +
  494. (8 * (ecc_level - 5));
  495. }
  496. }
  497. }
  498. nanddev_set_ecc_requirements(base, &requirements);
  499. }
  500. static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
  501. bool valid_jedecid)
  502. {
  503. u8 nand_tech;
  504. /* We need scrambling on all TLC NANDs*/
  505. if (nanddev_bits_per_cell(&chip->base) > 2)
  506. chip->options |= NAND_NEED_SCRAMBLING;
  507. /* And on MLC NANDs with sub-3xnm process */
  508. if (valid_jedecid) {
  509. nand_tech = chip->id.data[5] >> 4;
  510. /* < 3xnm */
  511. if (nand_tech > 0)
  512. chip->options |= NAND_NEED_SCRAMBLING;
  513. } else {
  514. nand_tech = chip->id.data[5] & 0x7;
  515. /* < 32nm */
  516. if (nand_tech > 2)
  517. chip->options |= NAND_NEED_SCRAMBLING;
  518. }
  519. }
  520. static void hynix_nand_decode_id(struct nand_chip *chip)
  521. {
  522. struct mtd_info *mtd = nand_to_mtd(chip);
  523. struct nand_memory_organization *memorg;
  524. bool valid_jedecid;
  525. u8 tmp;
  526. memorg = nanddev_get_memorg(&chip->base);
  527. /*
  528. * Exclude all SLC NANDs from this advanced detection scheme.
  529. * According to the ranges defined in several datasheets, it might
  530. * appear that even SLC NANDs could fall in this extended ID scheme.
  531. * If that the case rework the test to let SLC NANDs go through the
  532. * detection process.
  533. */
  534. if (chip->id.len < 6 || nand_is_slc(chip)) {
  535. nand_decode_ext_id(chip);
  536. return;
  537. }
  538. /* Extract pagesize */
  539. memorg->pagesize = 2048 << (chip->id.data[3] & 0x03);
  540. mtd->writesize = memorg->pagesize;
  541. tmp = (chip->id.data[3] >> 4) & 0x3;
  542. /*
  543. * When bit7 is set that means we start counting at 1MiB, otherwise
  544. * we start counting at 128KiB and shift this value the content of
  545. * ID[3][4:5].
  546. * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
  547. * this case the erasesize is set to 768KiB.
  548. */
  549. if (chip->id.data[3] & 0x80) {
  550. memorg->pages_per_eraseblock = (SZ_1M << tmp) /
  551. memorg->pagesize;
  552. mtd->erasesize = SZ_1M << tmp;
  553. } else if (tmp == 3) {
  554. memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) /
  555. memorg->pagesize;
  556. mtd->erasesize = SZ_512K + SZ_256K;
  557. } else {
  558. memorg->pages_per_eraseblock = (SZ_128K << tmp) /
  559. memorg->pagesize;
  560. mtd->erasesize = SZ_128K << tmp;
  561. }
  562. /*
  563. * Modern Toggle DDR NANDs have a valid JEDECID even though they are
  564. * not exposing a valid JEDEC parameter table.
  565. * These NANDs use a different NAND ID scheme.
  566. */
  567. valid_jedecid = hynix_nand_has_valid_jedecid(chip);
  568. hynix_nand_extract_oobsize(chip, valid_jedecid);
  569. hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
  570. hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
  571. }
  572. static void hynix_nand_cleanup(struct nand_chip *chip)
  573. {
  574. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  575. if (!hynix)
  576. return;
  577. kfree(hynix->read_retry);
  578. kfree(hynix);
  579. nand_set_manufacturer_data(chip, NULL);
  580. }
  581. static int
  582. h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip,
  583. struct nand_interface_config *iface)
  584. {
  585. onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
  586. return nand_choose_best_sdr_timings(chip, iface, NULL);
  587. }
  588. static int h27ucg8t2etrbc_init(struct nand_chip *chip)
  589. {
  590. struct mtd_info *mtd = nand_to_mtd(chip);
  591. chip->options |= NAND_NEED_SCRAMBLING;
  592. mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
  593. return 0;
  594. }
  595. static int hynix_nand_init(struct nand_chip *chip)
  596. {
  597. struct hynix_nand *hynix;
  598. int ret;
  599. if (!nand_is_slc(chip))
  600. chip->options |= NAND_BBM_LASTPAGE;
  601. else
  602. chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
  603. hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
  604. if (!hynix)
  605. return -ENOMEM;
  606. nand_set_manufacturer_data(chip, hynix);
  607. if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model,
  608. sizeof("H27UCG8T2ATR-BC") - 1))
  609. chip->ops.choose_interface_config =
  610. h27ucg8t2atrbc_choose_interface_config;
  611. if (!strncmp("H27UCG8T2ETR-BC", chip->parameters.model,
  612. sizeof("H27UCG8T2ETR-BC") - 1))
  613. h27ucg8t2etrbc_init(chip);
  614. ret = hynix_nand_rr_init(chip);
  615. if (ret)
  616. hynix_nand_cleanup(chip);
  617. return ret;
  618. }
  619. const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
  620. .detect = hynix_nand_decode_id,
  621. .init = hynix_nand_init,
  622. .cleanup = hynix_nand_cleanup,
  623. };