nand_hynix.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. /*
  2. * Copyright (C) 2017 Free Electrons
  3. * Copyright (C) 2017 NextThing Co
  4. *
  5. * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/sizes.h>
  18. #include <linux/slab.h>
  19. #include "internals.h"
  20. #define NAND_HYNIX_CMD_SET_PARAMS 0x36
  21. #define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
  22. #define NAND_HYNIX_1XNM_RR_REPEAT 8
  23. /**
  24. * struct hynix_read_retry - read-retry data
  25. * @nregs: number of register to set when applying a new read-retry mode
  26. * @regs: register offsets (NAND chip dependent)
  27. * @values: array of values to set in registers. The array size is equal to
  28. * (nregs * nmodes)
  29. */
  30. struct hynix_read_retry {
  31. int nregs;
  32. const u8 *regs;
  33. u8 values[0];
  34. };
  35. /**
  36. * struct hynix_nand - private Hynix NAND struct
  37. * @nand_technology: manufacturing process expressed in picometer
  38. * @read_retry: read-retry information
  39. */
  40. struct hynix_nand {
  41. const struct hynix_read_retry *read_retry;
  42. };
  43. /**
  44. * struct hynix_read_retry_otp - structure describing how the read-retry OTP
  45. * area
  46. * @nregs: number of hynix private registers to set before reading the reading
  47. * the OTP area
  48. * @regs: registers that should be configured
  49. * @values: values that should be set in regs
  50. * @page: the address to pass to the READ_PAGE command. Depends on the NAND
  51. * chip
  52. * @size: size of the read-retry OTP section
  53. */
  54. struct hynix_read_retry_otp {
  55. int nregs;
  56. const u8 *regs;
  57. const u8 *values;
  58. int page;
  59. int size;
  60. };
  61. static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
  62. {
  63. u8 jedecid[5] = { };
  64. int ret;
  65. ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
  66. if (ret)
  67. return false;
  68. return !strncmp("JEDEC", jedecid, sizeof(jedecid));
  69. }
  70. static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
  71. {
  72. if (chip->exec_op) {
  73. struct nand_op_instr instrs[] = {
  74. NAND_OP_CMD(cmd, 0),
  75. };
  76. struct nand_operation op = NAND_OPERATION(instrs);
  77. return nand_exec_op(chip, &op);
  78. }
  79. chip->legacy.cmdfunc(chip, cmd, -1, -1);
  80. return 0;
  81. }
  82. static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
  83. {
  84. u16 column = ((u16)addr << 8) | addr;
  85. if (chip->exec_op) {
  86. struct nand_op_instr instrs[] = {
  87. NAND_OP_ADDR(1, &addr, 0),
  88. NAND_OP_8BIT_DATA_OUT(1, &val, 0),
  89. };
  90. struct nand_operation op = NAND_OPERATION(instrs);
  91. return nand_exec_op(chip, &op);
  92. }
  93. chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1);
  94. chip->legacy.write_byte(chip, val);
  95. return 0;
  96. }
  97. static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
  98. {
  99. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  100. const u8 *values;
  101. int i, ret;
  102. values = hynix->read_retry->values +
  103. (retry_mode * hynix->read_retry->nregs);
  104. /* Enter 'Set Hynix Parameters' mode */
  105. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  106. if (ret)
  107. return ret;
  108. /*
  109. * Configure the NAND in the requested read-retry mode.
  110. * This is done by setting pre-defined values in internal NAND
  111. * registers.
  112. *
  113. * The set of registers is NAND specific, and the values are either
  114. * predefined or extracted from an OTP area on the NAND (values are
  115. * probably tweaked at production in this case).
  116. */
  117. for (i = 0; i < hynix->read_retry->nregs; i++) {
  118. ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
  119. values[i]);
  120. if (ret)
  121. return ret;
  122. }
  123. /* Apply the new settings. */
  124. return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  125. }
  126. /**
  127. * hynix_get_majority - get the value that is occurring the most in a given
  128. * set of values
  129. * @in: the array of values to test
  130. * @repeat: the size of the in array
  131. * @out: pointer used to store the output value
  132. *
  133. * This function implements the 'majority check' logic that is supposed to
  134. * overcome the unreliability of MLC NANDs when reading the OTP area storing
  135. * the read-retry parameters.
  136. *
  137. * It's based on a pretty simple assumption: if we repeat the same value
  138. * several times and then take the one that is occurring the most, we should
  139. * find the correct value.
  140. * Let's hope this dummy algorithm prevents us from losing the read-retry
  141. * parameters.
  142. */
  143. static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
  144. {
  145. int i, j, half = repeat / 2;
  146. /*
  147. * We only test the first half of the in array because we must ensure
  148. * that the value is at least occurring repeat / 2 times.
  149. *
  150. * This loop is suboptimal since we may count the occurrences of the
  151. * same value several time, but we are doing that on small sets, which
  152. * makes it acceptable.
  153. */
  154. for (i = 0; i < half; i++) {
  155. int cnt = 0;
  156. u8 val = in[i];
  157. /* Count all values that are matching the one at index i. */
  158. for (j = i + 1; j < repeat; j++) {
  159. if (in[j] == val)
  160. cnt++;
  161. }
  162. /* We found a value occurring more than repeat / 2. */
  163. if (cnt > half) {
  164. *out = val;
  165. return 0;
  166. }
  167. }
  168. return -EIO;
  169. }
  170. static int hynix_read_rr_otp(struct nand_chip *chip,
  171. const struct hynix_read_retry_otp *info,
  172. void *buf)
  173. {
  174. int i, ret;
  175. ret = nand_reset_op(chip);
  176. if (ret)
  177. return ret;
  178. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  179. if (ret)
  180. return ret;
  181. for (i = 0; i < info->nregs; i++) {
  182. ret = hynix_nand_reg_write_op(chip, info->regs[i],
  183. info->values[i]);
  184. if (ret)
  185. return ret;
  186. }
  187. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  188. if (ret)
  189. return ret;
  190. /* Sequence to enter OTP mode? */
  191. ret = hynix_nand_cmd_op(chip, 0x17);
  192. if (ret)
  193. return ret;
  194. ret = hynix_nand_cmd_op(chip, 0x4);
  195. if (ret)
  196. return ret;
  197. ret = hynix_nand_cmd_op(chip, 0x19);
  198. if (ret)
  199. return ret;
  200. /* Now read the page */
  201. ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
  202. if (ret)
  203. return ret;
  204. /* Put everything back to normal */
  205. ret = nand_reset_op(chip);
  206. if (ret)
  207. return ret;
  208. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  209. if (ret)
  210. return ret;
  211. ret = hynix_nand_reg_write_op(chip, 0x38, 0);
  212. if (ret)
  213. return ret;
  214. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  215. if (ret)
  216. return ret;
  217. return nand_read_page_op(chip, 0, 0, NULL, 0);
  218. }
  219. #define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
  220. #define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
  221. #define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
  222. (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
  223. static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
  224. int mode, int reg, bool inv, u8 *val)
  225. {
  226. u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
  227. int val_offs = (mode * nregs) + reg;
  228. int set_size = nmodes * nregs;
  229. int i, ret;
  230. for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
  231. int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
  232. tmp[i] = buf[val_offs + set_offs];
  233. }
  234. ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
  235. if (ret)
  236. return ret;
  237. if (inv)
  238. *val = ~*val;
  239. return 0;
  240. }
  241. static u8 hynix_1xnm_mlc_read_retry_regs[] = {
  242. 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
  243. };
  244. static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
  245. const struct hynix_read_retry_otp *info)
  246. {
  247. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  248. struct hynix_read_retry *rr = NULL;
  249. int ret, i, j;
  250. u8 nregs, nmodes;
  251. u8 *buf;
  252. buf = kmalloc(info->size, GFP_KERNEL);
  253. if (!buf)
  254. return -ENOMEM;
  255. ret = hynix_read_rr_otp(chip, info, buf);
  256. if (ret)
  257. goto out;
  258. ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
  259. &nmodes);
  260. if (ret)
  261. goto out;
  262. ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
  263. NAND_HYNIX_1XNM_RR_REPEAT,
  264. &nregs);
  265. if (ret)
  266. goto out;
  267. rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
  268. if (!rr) {
  269. ret = -ENOMEM;
  270. goto out;
  271. }
  272. for (i = 0; i < nmodes; i++) {
  273. for (j = 0; j < nregs; j++) {
  274. u8 *val = rr->values + (i * nregs);
  275. ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
  276. false, val);
  277. if (!ret)
  278. continue;
  279. ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
  280. true, val);
  281. if (ret)
  282. goto out;
  283. }
  284. }
  285. rr->nregs = nregs;
  286. rr->regs = hynix_1xnm_mlc_read_retry_regs;
  287. hynix->read_retry = rr;
  288. chip->setup_read_retry = hynix_nand_setup_read_retry;
  289. chip->read_retries = nmodes;
  290. out:
  291. kfree(buf);
  292. if (ret)
  293. kfree(rr);
  294. return ret;
  295. }
  296. static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
  297. static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
  298. static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
  299. {
  300. .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
  301. .regs = hynix_mlc_1xnm_rr_otp_regs,
  302. .values = hynix_mlc_1xnm_rr_otp_values,
  303. .page = 0x21f,
  304. .size = 784
  305. },
  306. {
  307. .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
  308. .regs = hynix_mlc_1xnm_rr_otp_regs,
  309. .values = hynix_mlc_1xnm_rr_otp_values,
  310. .page = 0x200,
  311. .size = 528,
  312. },
  313. };
  314. static int hynix_nand_rr_init(struct nand_chip *chip)
  315. {
  316. int i, ret = 0;
  317. bool valid_jedecid;
  318. valid_jedecid = hynix_nand_has_valid_jedecid(chip);
  319. /*
  320. * We only support read-retry for 1xnm NANDs, and those NANDs all
  321. * expose a valid JEDEC ID.
  322. */
  323. if (valid_jedecid) {
  324. u8 nand_tech = chip->id.data[5] >> 4;
  325. /* 1xnm technology */
  326. if (nand_tech == 4) {
  327. for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
  328. i++) {
  329. /*
  330. * FIXME: Hynix recommend to copy the
  331. * read-retry OTP area into a normal page.
  332. */
  333. ret = hynix_mlc_1xnm_rr_init(chip,
  334. hynix_mlc_1xnm_rr_otps);
  335. if (!ret)
  336. break;
  337. }
  338. }
  339. }
  340. if (ret)
  341. pr_warn("failed to initialize read-retry infrastructure");
  342. return 0;
  343. }
  344. static void hynix_nand_extract_oobsize(struct nand_chip *chip,
  345. bool valid_jedecid)
  346. {
  347. struct mtd_info *mtd = nand_to_mtd(chip);
  348. u8 oobsize;
  349. oobsize = ((chip->id.data[3] >> 2) & 0x3) |
  350. ((chip->id.data[3] >> 4) & 0x4);
  351. if (valid_jedecid) {
  352. switch (oobsize) {
  353. case 0:
  354. mtd->oobsize = 2048;
  355. break;
  356. case 1:
  357. mtd->oobsize = 1664;
  358. break;
  359. case 2:
  360. mtd->oobsize = 1024;
  361. break;
  362. case 3:
  363. mtd->oobsize = 640;
  364. break;
  365. default:
  366. /*
  367. * We should never reach this case, but if that
  368. * happens, this probably means Hynix decided to use
  369. * a different extended ID format, and we should find
  370. * a way to support it.
  371. */
  372. WARN(1, "Invalid OOB size");
  373. break;
  374. }
  375. } else {
  376. switch (oobsize) {
  377. case 0:
  378. mtd->oobsize = 128;
  379. break;
  380. case 1:
  381. mtd->oobsize = 224;
  382. break;
  383. case 2:
  384. mtd->oobsize = 448;
  385. break;
  386. case 3:
  387. mtd->oobsize = 64;
  388. break;
  389. case 4:
  390. mtd->oobsize = 32;
  391. break;
  392. case 5:
  393. mtd->oobsize = 16;
  394. break;
  395. case 6:
  396. mtd->oobsize = 640;
  397. break;
  398. default:
  399. /*
  400. * We should never reach this case, but if that
  401. * happens, this probably means Hynix decided to use
  402. * a different extended ID format, and we should find
  403. * a way to support it.
  404. */
  405. WARN(1, "Invalid OOB size");
  406. break;
  407. }
  408. /*
  409. * The datasheet of H27UCG8T2BTR mentions that the "Redundant
  410. * Area Size" is encoded "per 8KB" (page size). This chip uses
  411. * a page size of 16KiB. The datasheet mentions an OOB size of
  412. * 1.280 bytes, but the OOB size encoded in the ID bytes (using
  413. * the existing logic above) is 640 bytes.
  414. * Update the OOB size for this chip by taking the value
  415. * determined above and scaling it to the actual page size (so
  416. * the actual OOB size for this chip is: 640 * 16k / 8k).
  417. */
  418. if (chip->id.data[1] == 0xde)
  419. mtd->oobsize *= mtd->writesize / SZ_8K;
  420. }
  421. }
  422. static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
  423. bool valid_jedecid)
  424. {
  425. u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
  426. if (valid_jedecid) {
  427. /* Reference: H27UCG8T2E datasheet */
  428. chip->ecc_step_ds = 1024;
  429. switch (ecc_level) {
  430. case 0:
  431. chip->ecc_step_ds = 0;
  432. chip->ecc_strength_ds = 0;
  433. break;
  434. case 1:
  435. chip->ecc_strength_ds = 4;
  436. break;
  437. case 2:
  438. chip->ecc_strength_ds = 24;
  439. break;
  440. case 3:
  441. chip->ecc_strength_ds = 32;
  442. break;
  443. case 4:
  444. chip->ecc_strength_ds = 40;
  445. break;
  446. case 5:
  447. chip->ecc_strength_ds = 50;
  448. break;
  449. case 6:
  450. chip->ecc_strength_ds = 60;
  451. break;
  452. default:
  453. /*
  454. * We should never reach this case, but if that
  455. * happens, this probably means Hynix decided to use
  456. * a different extended ID format, and we should find
  457. * a way to support it.
  458. */
  459. WARN(1, "Invalid ECC requirements");
  460. }
  461. } else {
  462. /*
  463. * The ECC requirements field meaning depends on the
  464. * NAND technology.
  465. */
  466. u8 nand_tech = chip->id.data[5] & 0x7;
  467. if (nand_tech < 3) {
  468. /* > 26nm, reference: H27UBG8T2A datasheet */
  469. if (ecc_level < 5) {
  470. chip->ecc_step_ds = 512;
  471. chip->ecc_strength_ds = 1 << ecc_level;
  472. } else if (ecc_level < 7) {
  473. if (ecc_level == 5)
  474. chip->ecc_step_ds = 2048;
  475. else
  476. chip->ecc_step_ds = 1024;
  477. chip->ecc_strength_ds = 24;
  478. } else {
  479. /*
  480. * We should never reach this case, but if that
  481. * happens, this probably means Hynix decided
  482. * to use a different extended ID format, and
  483. * we should find a way to support it.
  484. */
  485. WARN(1, "Invalid ECC requirements");
  486. }
  487. } else {
  488. /* <= 26nm, reference: H27UBG8T2B datasheet */
  489. if (!ecc_level) {
  490. chip->ecc_step_ds = 0;
  491. chip->ecc_strength_ds = 0;
  492. } else if (ecc_level < 5) {
  493. chip->ecc_step_ds = 512;
  494. chip->ecc_strength_ds = 1 << (ecc_level - 1);
  495. } else {
  496. chip->ecc_step_ds = 1024;
  497. chip->ecc_strength_ds = 24 +
  498. (8 * (ecc_level - 5));
  499. }
  500. }
  501. }
  502. }
  503. static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
  504. bool valid_jedecid)
  505. {
  506. u8 nand_tech;
  507. /* We need scrambling on all TLC NANDs*/
  508. if (chip->bits_per_cell > 2)
  509. chip->options |= NAND_NEED_SCRAMBLING;
  510. /* And on MLC NANDs with sub-3xnm process */
  511. if (valid_jedecid) {
  512. nand_tech = chip->id.data[5] >> 4;
  513. /* < 3xnm */
  514. if (nand_tech > 0)
  515. chip->options |= NAND_NEED_SCRAMBLING;
  516. } else {
  517. nand_tech = chip->id.data[5] & 0x7;
  518. /* < 32nm */
  519. if (nand_tech > 2)
  520. chip->options |= NAND_NEED_SCRAMBLING;
  521. }
  522. }
  523. static void hynix_nand_decode_id(struct nand_chip *chip)
  524. {
  525. struct mtd_info *mtd = nand_to_mtd(chip);
  526. bool valid_jedecid;
  527. u8 tmp;
  528. /*
  529. * Exclude all SLC NANDs from this advanced detection scheme.
  530. * According to the ranges defined in several datasheets, it might
  531. * appear that even SLC NANDs could fall in this extended ID scheme.
  532. * If that the case rework the test to let SLC NANDs go through the
  533. * detection process.
  534. */
  535. if (chip->id.len < 6 || nand_is_slc(chip)) {
  536. nand_decode_ext_id(chip);
  537. return;
  538. }
  539. /* Extract pagesize */
  540. mtd->writesize = 2048 << (chip->id.data[3] & 0x03);
  541. tmp = (chip->id.data[3] >> 4) & 0x3;
  542. /*
  543. * When bit7 is set that means we start counting at 1MiB, otherwise
  544. * we start counting at 128KiB and shift this value the content of
  545. * ID[3][4:5].
  546. * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
  547. * this case the erasesize is set to 768KiB.
  548. */
  549. if (chip->id.data[3] & 0x80)
  550. mtd->erasesize = SZ_1M << tmp;
  551. else if (tmp == 3)
  552. mtd->erasesize = SZ_512K + SZ_256K;
  553. else
  554. mtd->erasesize = SZ_128K << tmp;
  555. /*
  556. * Modern Toggle DDR NANDs have a valid JEDECID even though they are
  557. * not exposing a valid JEDEC parameter table.
  558. * These NANDs use a different NAND ID scheme.
  559. */
  560. valid_jedecid = hynix_nand_has_valid_jedecid(chip);
  561. hynix_nand_extract_oobsize(chip, valid_jedecid);
  562. hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
  563. hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
  564. }
  565. static void hynix_nand_cleanup(struct nand_chip *chip)
  566. {
  567. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  568. if (!hynix)
  569. return;
  570. kfree(hynix->read_retry);
  571. kfree(hynix);
  572. nand_set_manufacturer_data(chip, NULL);
  573. }
  574. static int hynix_nand_init(struct nand_chip *chip)
  575. {
  576. struct hynix_nand *hynix;
  577. int ret;
  578. if (!nand_is_slc(chip))
  579. chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
  580. else
  581. chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
  582. hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
  583. if (!hynix)
  584. return -ENOMEM;
  585. nand_set_manufacturer_data(chip, hynix);
  586. ret = hynix_nand_rr_init(chip);
  587. if (ret)
  588. hynix_nand_cleanup(chip);
  589. return ret;
  590. }
  591. const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
  592. .detect = hynix_nand_decode_id,
  593. .init = hynix_nand_init,
  594. .cleanup = hynix_nand_cleanup,
  595. };