davinci_nand.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * davinci_nand.c - NAND Flash Driver for DaVinci family chips
  3. *
  4. * Copyright © 2006 Texas Instruments.
  5. *
  6. * Port to 2.6.23 Copyright © 2008 by:
  7. * Sander Huijsen <Shuijsen@optelecom-nkf.com>
  8. * Troy Kisky <troy.kisky@boundarydevices.com>
  9. * Dirk Behme <Dirk.Behme@gmail.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/err.h>
  29. #include <linux/clk.h>
  30. #include <linux/io.h>
  31. #include <linux/mtd/nand.h>
  32. #include <linux/mtd/partitions.h>
  33. #include <linux/slab.h>
  34. #include <linux/of_device.h>
  35. #include <linux/of.h>
  36. #include <linux/of_mtd.h>
  37. #include <linux/platform_data/mtd-davinci.h>
  38. #include <linux/platform_data/mtd-davinci-aemif.h>
  39. /*
  40. * This is a device driver for the NAND flash controller found on the
  41. * various DaVinci family chips. It handles up to four SoC chipselects,
  42. * and some flavors of secondary chipselect (e.g. based on A12) as used
  43. * with multichip packages.
  44. *
  45. * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
  46. * available on chips like the DM355 and OMAP-L137 and needed with the
  47. * more error-prone MLC NAND chips.
  48. *
  49. * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
  50. * outputs in a "wire-AND" configuration, with no per-chip signals.
  51. */
  52. struct davinci_nand_info {
  53. struct nand_chip chip;
  54. struct device *dev;
  55. struct clk *clk;
  56. bool is_readmode;
  57. void __iomem *base;
  58. void __iomem *vaddr;
  59. uint32_t ioaddr;
  60. uint32_t current_cs;
  61. uint32_t mask_chipsel;
  62. uint32_t mask_ale;
  63. uint32_t mask_cle;
  64. uint32_t core_chipsel;
  65. struct davinci_aemif_timing *timing;
  66. };
  67. static DEFINE_SPINLOCK(davinci_nand_lock);
  68. static bool ecc4_busy;
  69. static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
  70. {
  71. return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
  72. }
  73. static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
  74. int offset)
  75. {
  76. return __raw_readl(info->base + offset);
  77. }
  78. static inline void davinci_nand_writel(struct davinci_nand_info *info,
  79. int offset, unsigned long value)
  80. {
  81. __raw_writel(value, info->base + offset);
  82. }
  83. /*----------------------------------------------------------------------*/
  84. /*
  85. * Access to hardware control lines: ALE, CLE, secondary chipselect.
  86. */
  87. static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
  88. unsigned int ctrl)
  89. {
  90. struct davinci_nand_info *info = to_davinci_nand(mtd);
  91. uint32_t addr = info->current_cs;
  92. struct nand_chip *nand = mtd_to_nand(mtd);
  93. /* Did the control lines change? */
  94. if (ctrl & NAND_CTRL_CHANGE) {
  95. if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
  96. addr |= info->mask_cle;
  97. else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
  98. addr |= info->mask_ale;
  99. nand->IO_ADDR_W = (void __iomem __force *)addr;
  100. }
  101. if (cmd != NAND_CMD_NONE)
  102. iowrite8(cmd, nand->IO_ADDR_W);
  103. }
  104. static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
  105. {
  106. struct davinci_nand_info *info = to_davinci_nand(mtd);
  107. uint32_t addr = info->ioaddr;
  108. /* maybe kick in a second chipselect */
  109. if (chip > 0)
  110. addr |= info->mask_chipsel;
  111. info->current_cs = addr;
  112. info->chip.IO_ADDR_W = (void __iomem __force *)addr;
  113. info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
  114. }
  115. /*----------------------------------------------------------------------*/
  116. /*
  117. * 1-bit hardware ECC ... context maintained for each core chipselect
  118. */
  119. static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
  120. {
  121. struct davinci_nand_info *info = to_davinci_nand(mtd);
  122. return davinci_nand_readl(info, NANDF1ECC_OFFSET
  123. + 4 * info->core_chipsel);
  124. }
  125. static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
  126. {
  127. struct davinci_nand_info *info;
  128. uint32_t nandcfr;
  129. unsigned long flags;
  130. info = to_davinci_nand(mtd);
  131. /* Reset ECC hardware */
  132. nand_davinci_readecc_1bit(mtd);
  133. spin_lock_irqsave(&davinci_nand_lock, flags);
  134. /* Restart ECC hardware */
  135. nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
  136. nandcfr |= BIT(8 + info->core_chipsel);
  137. davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
  138. spin_unlock_irqrestore(&davinci_nand_lock, flags);
  139. }
  140. /*
  141. * Read hardware ECC value and pack into three bytes
  142. */
  143. static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
  144. const u_char *dat, u_char *ecc_code)
  145. {
  146. unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
  147. unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
  148. /* invert so that erased block ecc is correct */
  149. ecc24 = ~ecc24;
  150. ecc_code[0] = (u_char)(ecc24);
  151. ecc_code[1] = (u_char)(ecc24 >> 8);
  152. ecc_code[2] = (u_char)(ecc24 >> 16);
  153. return 0;
  154. }
  155. static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
  156. u_char *read_ecc, u_char *calc_ecc)
  157. {
  158. struct nand_chip *chip = mtd_to_nand(mtd);
  159. uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
  160. (read_ecc[2] << 16);
  161. uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
  162. (calc_ecc[2] << 16);
  163. uint32_t diff = eccCalc ^ eccNand;
  164. if (diff) {
  165. if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
  166. /* Correctable error */
  167. if ((diff >> (12 + 3)) < chip->ecc.size) {
  168. dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
  169. return 1;
  170. } else {
  171. return -EBADMSG;
  172. }
  173. } else if (!(diff & (diff - 1))) {
  174. /* Single bit ECC error in the ECC itself,
  175. * nothing to fix */
  176. return 1;
  177. } else {
  178. /* Uncorrectable error */
  179. return -EBADMSG;
  180. }
  181. }
  182. return 0;
  183. }
  184. /*----------------------------------------------------------------------*/
  185. /*
  186. * 4-bit hardware ECC ... context maintained over entire AEMIF
  187. *
  188. * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
  189. * since that forces use of a problematic "infix OOB" layout.
  190. * Among other things, it trashes manufacturer bad block markers.
  191. * Also, and specific to this hardware, it ECC-protects the "prepad"
  192. * in the OOB ... while having ECC protection for parts of OOB would
  193. * seem useful, the current MTD stack sometimes wants to update the
  194. * OOB without recomputing ECC.
  195. */
  196. static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
  197. {
  198. struct davinci_nand_info *info = to_davinci_nand(mtd);
  199. unsigned long flags;
  200. u32 val;
  201. spin_lock_irqsave(&davinci_nand_lock, flags);
  202. /* Start 4-bit ECC calculation for read/write */
  203. val = davinci_nand_readl(info, NANDFCR_OFFSET);
  204. val &= ~(0x03 << 4);
  205. val |= (info->core_chipsel << 4) | BIT(12);
  206. davinci_nand_writel(info, NANDFCR_OFFSET, val);
  207. info->is_readmode = (mode == NAND_ECC_READ);
  208. spin_unlock_irqrestore(&davinci_nand_lock, flags);
  209. }
  210. /* Read raw ECC code after writing to NAND. */
  211. static void
  212. nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
  213. {
  214. const u32 mask = 0x03ff03ff;
  215. code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
  216. code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
  217. code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
  218. code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
  219. }
  220. /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
  221. static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
  222. const u_char *dat, u_char *ecc_code)
  223. {
  224. struct davinci_nand_info *info = to_davinci_nand(mtd);
  225. u32 raw_ecc[4], *p;
  226. unsigned i;
  227. /* After a read, terminate ECC calculation by a dummy read
  228. * of some 4-bit ECC register. ECC covers everything that
  229. * was read; correct() just uses the hardware state, so
  230. * ecc_code is not needed.
  231. */
  232. if (info->is_readmode) {
  233. davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
  234. return 0;
  235. }
  236. /* Pack eight raw 10-bit ecc values into ten bytes, making
  237. * two passes which each convert four values (in upper and
  238. * lower halves of two 32-bit words) into five bytes. The
  239. * ROM boot loader uses this same packing scheme.
  240. */
  241. nand_davinci_readecc_4bit(info, raw_ecc);
  242. for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
  243. *ecc_code++ = p[0] & 0xff;
  244. *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
  245. *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
  246. *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
  247. *ecc_code++ = (p[1] >> 18) & 0xff;
  248. }
  249. return 0;
  250. }
  251. /* Correct up to 4 bits in data we just read, using state left in the
  252. * hardware plus the ecc_code computed when it was first written.
  253. */
  254. static int nand_davinci_correct_4bit(struct mtd_info *mtd,
  255. u_char *data, u_char *ecc_code, u_char *null)
  256. {
  257. int i;
  258. struct davinci_nand_info *info = to_davinci_nand(mtd);
  259. unsigned short ecc10[8];
  260. unsigned short *ecc16;
  261. u32 syndrome[4];
  262. u32 ecc_state;
  263. unsigned num_errors, corrected;
  264. unsigned long timeo;
  265. /* Unpack ten bytes into eight 10 bit values. We know we're
  266. * little-endian, and use type punning for less shifting/masking.
  267. */
  268. if (WARN_ON(0x01 & (unsigned) ecc_code))
  269. return -EINVAL;
  270. ecc16 = (unsigned short *)ecc_code;
  271. ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
  272. ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
  273. ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
  274. ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
  275. ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
  276. ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
  277. ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
  278. ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
  279. /* Tell ECC controller about the expected ECC codes. */
  280. for (i = 7; i >= 0; i--)
  281. davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
  282. /* Allow time for syndrome calculation ... then read it.
  283. * A syndrome of all zeroes 0 means no detected errors.
  284. */
  285. davinci_nand_readl(info, NANDFSR_OFFSET);
  286. nand_davinci_readecc_4bit(info, syndrome);
  287. if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
  288. return 0;
  289. /*
  290. * Clear any previous address calculation by doing a dummy read of an
  291. * error address register.
  292. */
  293. davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
  294. /* Start address calculation, and wait for it to complete.
  295. * We _could_ start reading more data while this is working,
  296. * to speed up the overall page read.
  297. */
  298. davinci_nand_writel(info, NANDFCR_OFFSET,
  299. davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
  300. /*
  301. * ECC_STATE field reads 0x3 (Error correction complete) immediately
  302. * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
  303. * begin trying to poll for the state, you may fall right out of your
  304. * loop without any of the correction calculations having taken place.
  305. * The recommendation from the hardware team is to initially delay as
  306. * long as ECC_STATE reads less than 4. After that, ECC HW has entered
  307. * correction state.
  308. */
  309. timeo = jiffies + usecs_to_jiffies(100);
  310. do {
  311. ecc_state = (davinci_nand_readl(info,
  312. NANDFSR_OFFSET) >> 8) & 0x0f;
  313. cpu_relax();
  314. } while ((ecc_state < 4) && time_before(jiffies, timeo));
  315. for (;;) {
  316. u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
  317. switch ((fsr >> 8) & 0x0f) {
  318. case 0: /* no error, should not happen */
  319. davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
  320. return 0;
  321. case 1: /* five or more errors detected */
  322. davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
  323. return -EBADMSG;
  324. case 2: /* error addresses computed */
  325. case 3:
  326. num_errors = 1 + ((fsr >> 16) & 0x03);
  327. goto correct;
  328. default: /* still working on it */
  329. cpu_relax();
  330. continue;
  331. }
  332. }
  333. correct:
  334. /* correct each error */
  335. for (i = 0, corrected = 0; i < num_errors; i++) {
  336. int error_address, error_value;
  337. if (i > 1) {
  338. error_address = davinci_nand_readl(info,
  339. NAND_ERR_ADD2_OFFSET);
  340. error_value = davinci_nand_readl(info,
  341. NAND_ERR_ERRVAL2_OFFSET);
  342. } else {
  343. error_address = davinci_nand_readl(info,
  344. NAND_ERR_ADD1_OFFSET);
  345. error_value = davinci_nand_readl(info,
  346. NAND_ERR_ERRVAL1_OFFSET);
  347. }
  348. if (i & 1) {
  349. error_address >>= 16;
  350. error_value >>= 16;
  351. }
  352. error_address &= 0x3ff;
  353. error_address = (512 + 7) - error_address;
  354. if (error_address < 512) {
  355. data[error_address] ^= error_value;
  356. corrected++;
  357. }
  358. }
  359. return corrected;
  360. }
  361. /*----------------------------------------------------------------------*/
  362. /*
  363. * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
  364. * how these chips are normally wired. This translates to both 8 and 16
  365. * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
  366. *
  367. * For now we assume that configuration, or any other one which ignores
  368. * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
  369. * and have that transparently morphed into multiple NAND operations.
  370. */
  371. static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  372. {
  373. struct nand_chip *chip = mtd_to_nand(mtd);
  374. if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
  375. ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
  376. else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
  377. ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
  378. else
  379. ioread8_rep(chip->IO_ADDR_R, buf, len);
  380. }
  381. static void nand_davinci_write_buf(struct mtd_info *mtd,
  382. const uint8_t *buf, int len)
  383. {
  384. struct nand_chip *chip = mtd_to_nand(mtd);
  385. if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
  386. iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
  387. else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
  388. iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
  389. else
  390. iowrite8_rep(chip->IO_ADDR_R, buf, len);
  391. }
  392. /*
  393. * Check hardware register for wait status. Returns 1 if device is ready,
  394. * 0 if it is still busy.
  395. */
  396. static int nand_davinci_dev_ready(struct mtd_info *mtd)
  397. {
  398. struct davinci_nand_info *info = to_davinci_nand(mtd);
  399. return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
  400. }
  401. /*----------------------------------------------------------------------*/
  402. /* An ECC layout for using 4-bit ECC with small-page flash, storing
  403. * ten ECC bytes plus the manufacturer's bad block marker byte, and
  404. * and not overlapping the default BBT markers.
  405. */
  406. static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
  407. struct mtd_oob_region *oobregion)
  408. {
  409. if (section > 2)
  410. return -ERANGE;
  411. if (!section) {
  412. oobregion->offset = 0;
  413. oobregion->length = 5;
  414. } else if (section == 1) {
  415. oobregion->offset = 6;
  416. oobregion->length = 2;
  417. } else {
  418. oobregion->offset = 13;
  419. oobregion->length = 3;
  420. }
  421. return 0;
  422. }
  423. static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
  424. struct mtd_oob_region *oobregion)
  425. {
  426. if (section > 1)
  427. return -ERANGE;
  428. if (!section) {
  429. oobregion->offset = 8;
  430. oobregion->length = 5;
  431. } else {
  432. oobregion->offset = 16;
  433. oobregion->length = mtd->oobsize - 16;
  434. }
  435. return 0;
  436. }
  437. static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
  438. .ecc = hwecc4_ooblayout_small_ecc,
  439. .free = hwecc4_ooblayout_small_free,
  440. };
  441. #if defined(CONFIG_OF)
  442. static const struct of_device_id davinci_nand_of_match[] = {
  443. {.compatible = "ti,davinci-nand", },
  444. {.compatible = "ti,keystone-nand", },
  445. {},
  446. };
  447. MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
  448. static struct davinci_nand_pdata
  449. *nand_davinci_get_pdata(struct platform_device *pdev)
  450. {
  451. if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
  452. struct davinci_nand_pdata *pdata;
  453. const char *mode;
  454. u32 prop;
  455. pdata = devm_kzalloc(&pdev->dev,
  456. sizeof(struct davinci_nand_pdata),
  457. GFP_KERNEL);
  458. pdev->dev.platform_data = pdata;
  459. if (!pdata)
  460. return ERR_PTR(-ENOMEM);
  461. if (!of_property_read_u32(pdev->dev.of_node,
  462. "ti,davinci-chipselect", &prop))
  463. pdev->id = prop;
  464. else
  465. return ERR_PTR(-EINVAL);
  466. if (!of_property_read_u32(pdev->dev.of_node,
  467. "ti,davinci-mask-ale", &prop))
  468. pdata->mask_ale = prop;
  469. if (!of_property_read_u32(pdev->dev.of_node,
  470. "ti,davinci-mask-cle", &prop))
  471. pdata->mask_cle = prop;
  472. if (!of_property_read_u32(pdev->dev.of_node,
  473. "ti,davinci-mask-chipsel", &prop))
  474. pdata->mask_chipsel = prop;
  475. if (!of_property_read_string(pdev->dev.of_node,
  476. "nand-ecc-mode", &mode) ||
  477. !of_property_read_string(pdev->dev.of_node,
  478. "ti,davinci-ecc-mode", &mode)) {
  479. if (!strncmp("none", mode, 4))
  480. pdata->ecc_mode = NAND_ECC_NONE;
  481. if (!strncmp("soft", mode, 4))
  482. pdata->ecc_mode = NAND_ECC_SOFT;
  483. if (!strncmp("hw", mode, 2))
  484. pdata->ecc_mode = NAND_ECC_HW;
  485. }
  486. if (!of_property_read_u32(pdev->dev.of_node,
  487. "ti,davinci-ecc-bits", &prop))
  488. pdata->ecc_bits = prop;
  489. prop = of_get_nand_bus_width(pdev->dev.of_node);
  490. if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
  491. "ti,davinci-nand-buswidth", &prop))
  492. if (prop == 16)
  493. pdata->options |= NAND_BUSWIDTH_16;
  494. if (of_property_read_bool(pdev->dev.of_node,
  495. "nand-on-flash-bbt") ||
  496. of_property_read_bool(pdev->dev.of_node,
  497. "ti,davinci-nand-use-bbt"))
  498. pdata->bbt_options = NAND_BBT_USE_FLASH;
  499. if (of_device_is_compatible(pdev->dev.of_node,
  500. "ti,keystone-nand")) {
  501. pdata->options |= NAND_NO_SUBPAGE_WRITE;
  502. }
  503. }
  504. return dev_get_platdata(&pdev->dev);
  505. }
  506. #else
  507. static struct davinci_nand_pdata
  508. *nand_davinci_get_pdata(struct platform_device *pdev)
  509. {
  510. return dev_get_platdata(&pdev->dev);
  511. }
  512. #endif
  513. static int nand_davinci_probe(struct platform_device *pdev)
  514. {
  515. struct davinci_nand_pdata *pdata;
  516. struct davinci_nand_info *info;
  517. struct resource *res1;
  518. struct resource *res2;
  519. void __iomem *vaddr;
  520. void __iomem *base;
  521. int ret;
  522. uint32_t val;
  523. nand_ecc_modes_t ecc_mode;
  524. struct mtd_info *mtd;
  525. pdata = nand_davinci_get_pdata(pdev);
  526. if (IS_ERR(pdata))
  527. return PTR_ERR(pdata);
  528. /* insist on board-specific configuration */
  529. if (!pdata)
  530. return -ENODEV;
  531. /* which external chipselect will we be managing? */
  532. if (pdev->id < 0 || pdev->id > 3)
  533. return -ENODEV;
  534. info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
  535. if (!info)
  536. return -ENOMEM;
  537. platform_set_drvdata(pdev, info);
  538. res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  539. res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  540. if (!res1 || !res2) {
  541. dev_err(&pdev->dev, "resource missing\n");
  542. return -EINVAL;
  543. }
  544. vaddr = devm_ioremap_resource(&pdev->dev, res1);
  545. if (IS_ERR(vaddr))
  546. return PTR_ERR(vaddr);
  547. /*
  548. * This registers range is used to setup NAND settings. In case with
  549. * TI AEMIF driver, the same memory address range is requested already
  550. * by AEMIF, so we cannot request it twice, just ioremap.
  551. * The AEMIF and NAND drivers not use the same registers in this range.
  552. */
  553. base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
  554. if (!base) {
  555. dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
  556. return -EADDRNOTAVAIL;
  557. }
  558. info->dev = &pdev->dev;
  559. info->base = base;
  560. info->vaddr = vaddr;
  561. mtd = nand_to_mtd(&info->chip);
  562. mtd->dev.parent = &pdev->dev;
  563. nand_set_flash_node(&info->chip, pdev->dev.of_node);
  564. info->chip.IO_ADDR_R = vaddr;
  565. info->chip.IO_ADDR_W = vaddr;
  566. info->chip.chip_delay = 0;
  567. info->chip.select_chip = nand_davinci_select_chip;
  568. /* options such as NAND_BBT_USE_FLASH */
  569. info->chip.bbt_options = pdata->bbt_options;
  570. /* options such as 16-bit widths */
  571. info->chip.options = pdata->options;
  572. info->chip.bbt_td = pdata->bbt_td;
  573. info->chip.bbt_md = pdata->bbt_md;
  574. info->timing = pdata->timing;
  575. info->ioaddr = (uint32_t __force) vaddr;
  576. info->current_cs = info->ioaddr;
  577. info->core_chipsel = pdev->id;
  578. info->mask_chipsel = pdata->mask_chipsel;
  579. /* use nandboot-capable ALE/CLE masks by default */
  580. info->mask_ale = pdata->mask_ale ? : MASK_ALE;
  581. info->mask_cle = pdata->mask_cle ? : MASK_CLE;
  582. /* Set address of hardware control function */
  583. info->chip.cmd_ctrl = nand_davinci_hwcontrol;
  584. info->chip.dev_ready = nand_davinci_dev_ready;
  585. /* Speed up buffer I/O */
  586. info->chip.read_buf = nand_davinci_read_buf;
  587. info->chip.write_buf = nand_davinci_write_buf;
  588. /* Use board-specific ECC config */
  589. ecc_mode = pdata->ecc_mode;
  590. ret = -EINVAL;
  591. switch (ecc_mode) {
  592. case NAND_ECC_NONE:
  593. case NAND_ECC_SOFT:
  594. pdata->ecc_bits = 0;
  595. break;
  596. case NAND_ECC_HW:
  597. if (pdata->ecc_bits == 4) {
  598. /* No sanity checks: CPUs must support this,
  599. * and the chips may not use NAND_BUSWIDTH_16.
  600. */
  601. /* No sharing 4-bit hardware between chipselects yet */
  602. spin_lock_irq(&davinci_nand_lock);
  603. if (ecc4_busy)
  604. ret = -EBUSY;
  605. else
  606. ecc4_busy = true;
  607. spin_unlock_irq(&davinci_nand_lock);
  608. if (ret == -EBUSY)
  609. return ret;
  610. info->chip.ecc.calculate = nand_davinci_calculate_4bit;
  611. info->chip.ecc.correct = nand_davinci_correct_4bit;
  612. info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
  613. info->chip.ecc.bytes = 10;
  614. info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
  615. } else {
  616. info->chip.ecc.calculate = nand_davinci_calculate_1bit;
  617. info->chip.ecc.correct = nand_davinci_correct_1bit;
  618. info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
  619. info->chip.ecc.bytes = 3;
  620. }
  621. info->chip.ecc.size = 512;
  622. info->chip.ecc.strength = pdata->ecc_bits;
  623. break;
  624. default:
  625. return -EINVAL;
  626. }
  627. info->chip.ecc.mode = ecc_mode;
  628. info->clk = devm_clk_get(&pdev->dev, "aemif");
  629. if (IS_ERR(info->clk)) {
  630. ret = PTR_ERR(info->clk);
  631. dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
  632. return ret;
  633. }
  634. ret = clk_prepare_enable(info->clk);
  635. if (ret < 0) {
  636. dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
  637. ret);
  638. goto err_clk_enable;
  639. }
  640. spin_lock_irq(&davinci_nand_lock);
  641. /* put CSxNAND into NAND mode */
  642. val = davinci_nand_readl(info, NANDFCR_OFFSET);
  643. val |= BIT(info->core_chipsel);
  644. davinci_nand_writel(info, NANDFCR_OFFSET, val);
  645. spin_unlock_irq(&davinci_nand_lock);
  646. /* Scan to find existence of the device(s) */
  647. ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
  648. if (ret < 0) {
  649. dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
  650. goto err;
  651. }
  652. /* Update ECC layout if needed ... for 1-bit HW ECC, the default
  653. * is OK, but it allocates 6 bytes when only 3 are needed (for
  654. * each 512 bytes). For the 4-bit HW ECC, that default is not
  655. * usable: 10 bytes are needed, not 6.
  656. */
  657. if (pdata->ecc_bits == 4) {
  658. int chunks = mtd->writesize / 512;
  659. if (!chunks || mtd->oobsize < 16) {
  660. dev_dbg(&pdev->dev, "too small\n");
  661. ret = -EINVAL;
  662. goto err;
  663. }
  664. /* For small page chips, preserve the manufacturer's
  665. * badblock marking data ... and make sure a flash BBT
  666. * table marker fits in the free bytes.
  667. */
  668. if (chunks == 1) {
  669. mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
  670. } else if (chunks == 4 || chunks == 8) {
  671. mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
  672. info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
  673. } else {
  674. ret = -EIO;
  675. goto err;
  676. }
  677. }
  678. ret = nand_scan_tail(mtd);
  679. if (ret < 0)
  680. goto err;
  681. if (pdata->parts)
  682. ret = mtd_device_parse_register(mtd, NULL, NULL,
  683. pdata->parts, pdata->nr_parts);
  684. else
  685. ret = mtd_device_register(mtd, NULL, 0);
  686. if (ret < 0)
  687. goto err;
  688. val = davinci_nand_readl(info, NRCSR_OFFSET);
  689. dev_info(&pdev->dev, "controller rev. %d.%d\n",
  690. (val >> 8) & 0xff, val & 0xff);
  691. return 0;
  692. err:
  693. clk_disable_unprepare(info->clk);
  694. err_clk_enable:
  695. spin_lock_irq(&davinci_nand_lock);
  696. if (ecc_mode == NAND_ECC_HW_SYNDROME)
  697. ecc4_busy = false;
  698. spin_unlock_irq(&davinci_nand_lock);
  699. return ret;
  700. }
  701. static int nand_davinci_remove(struct platform_device *pdev)
  702. {
  703. struct davinci_nand_info *info = platform_get_drvdata(pdev);
  704. spin_lock_irq(&davinci_nand_lock);
  705. if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
  706. ecc4_busy = false;
  707. spin_unlock_irq(&davinci_nand_lock);
  708. nand_release(nand_to_mtd(&info->chip));
  709. clk_disable_unprepare(info->clk);
  710. return 0;
  711. }
  712. static struct platform_driver nand_davinci_driver = {
  713. .probe = nand_davinci_probe,
  714. .remove = nand_davinci_remove,
  715. .driver = {
  716. .name = "davinci_nand",
  717. .of_match_table = of_match_ptr(davinci_nand_of_match),
  718. },
  719. };
  720. MODULE_ALIAS("platform:davinci_nand");
  721. module_platform_driver(nand_davinci_driver);
  722. MODULE_LICENSE("GPL");
  723. MODULE_AUTHOR("Texas Instruments");
  724. MODULE_DESCRIPTION("Davinci NAND flash driver");