tegra_nand.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
  4. * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
  5. * Copyright (C) 2012 Avionic Design GmbH
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/completion.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/err.h>
  11. #include <linux/gpio/consumer.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/module.h>
  15. #include <linux/mtd/partitions.h>
  16. #include <linux/mtd/rawnand.h>
  17. #include <linux/of.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/reset.h>
  20. #define COMMAND 0x00
  21. #define COMMAND_GO BIT(31)
  22. #define COMMAND_CLE BIT(30)
  23. #define COMMAND_ALE BIT(29)
  24. #define COMMAND_PIO BIT(28)
  25. #define COMMAND_TX BIT(27)
  26. #define COMMAND_RX BIT(26)
  27. #define COMMAND_SEC_CMD BIT(25)
  28. #define COMMAND_AFT_DAT BIT(24)
  29. #define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
  30. #define COMMAND_A_VALID BIT(19)
  31. #define COMMAND_B_VALID BIT(18)
  32. #define COMMAND_RD_STATUS_CHK BIT(17)
  33. #define COMMAND_RBSY_CHK BIT(16)
  34. #define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
  35. #define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
  36. #define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
  37. #define STATUS 0x04
  38. #define ISR 0x08
  39. #define ISR_CORRFAIL_ERR BIT(24)
  40. #define ISR_UND BIT(7)
  41. #define ISR_OVR BIT(6)
  42. #define ISR_CMD_DONE BIT(5)
  43. #define ISR_ECC_ERR BIT(4)
  44. #define IER 0x0c
  45. #define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
  46. #define IER_UND BIT(7)
  47. #define IER_OVR BIT(6)
  48. #define IER_CMD_DONE BIT(5)
  49. #define IER_ECC_ERR BIT(4)
  50. #define IER_GIE BIT(0)
  51. #define CONFIG 0x10
  52. #define CONFIG_HW_ECC BIT(31)
  53. #define CONFIG_ECC_SEL BIT(30)
  54. #define CONFIG_ERR_COR BIT(29)
  55. #define CONFIG_PIPE_EN BIT(28)
  56. #define CONFIG_TVAL_4 (0 << 24)
  57. #define CONFIG_TVAL_6 (1 << 24)
  58. #define CONFIG_TVAL_8 (2 << 24)
  59. #define CONFIG_SKIP_SPARE BIT(23)
  60. #define CONFIG_BUS_WIDTH_16 BIT(21)
  61. #define CONFIG_COM_BSY BIT(20)
  62. #define CONFIG_PS_256 (0 << 16)
  63. #define CONFIG_PS_512 (1 << 16)
  64. #define CONFIG_PS_1024 (2 << 16)
  65. #define CONFIG_PS_2048 (3 << 16)
  66. #define CONFIG_PS_4096 (4 << 16)
  67. #define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
  68. #define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
  69. #define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
  70. #define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
  71. #define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
  72. #define TIMING_1 0x14
  73. #define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
  74. #define TIMING_TWB(x) (((x) & 0xf) << 24)
  75. #define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
  76. #define TIMING_TWHR(x) (((x) & 0xf) << 16)
  77. #define TIMING_TCS(x) (((x) & 0x3) << 14)
  78. #define TIMING_TWH(x) (((x) & 0x3) << 12)
  79. #define TIMING_TWP(x) (((x) & 0xf) << 8)
  80. #define TIMING_TRH(x) (((x) & 0x3) << 4)
  81. #define TIMING_TRP(x) (((x) & 0xf) << 0)
  82. #define RESP 0x18
  83. #define TIMING_2 0x1c
  84. #define TIMING_TADL(x) ((x) & 0xf)
  85. #define CMD_REG1 0x20
  86. #define CMD_REG2 0x24
  87. #define ADDR_REG1 0x28
  88. #define ADDR_REG2 0x2c
  89. #define DMA_MST_CTRL 0x30
  90. #define DMA_MST_CTRL_GO BIT(31)
  91. #define DMA_MST_CTRL_IN (0 << 30)
  92. #define DMA_MST_CTRL_OUT BIT(30)
  93. #define DMA_MST_CTRL_PERF_EN BIT(29)
  94. #define DMA_MST_CTRL_IE_DONE BIT(28)
  95. #define DMA_MST_CTRL_REUSE BIT(27)
  96. #define DMA_MST_CTRL_BURST_1 (2 << 24)
  97. #define DMA_MST_CTRL_BURST_4 (3 << 24)
  98. #define DMA_MST_CTRL_BURST_8 (4 << 24)
  99. #define DMA_MST_CTRL_BURST_16 (5 << 24)
  100. #define DMA_MST_CTRL_IS_DONE BIT(20)
  101. #define DMA_MST_CTRL_EN_A BIT(2)
  102. #define DMA_MST_CTRL_EN_B BIT(1)
  103. #define DMA_CFG_A 0x34
  104. #define DMA_CFG_B 0x38
  105. #define FIFO_CTRL 0x3c
  106. #define FIFO_CTRL_CLR_ALL BIT(3)
  107. #define DATA_PTR 0x40
  108. #define TAG_PTR 0x44
  109. #define ECC_PTR 0x48
  110. #define DEC_STATUS 0x4c
  111. #define DEC_STATUS_A_ECC_FAIL BIT(1)
  112. #define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
  113. #define DEC_STATUS_ERR_COUNT_SHIFT 16
  114. #define HWSTATUS_CMD 0x50
  115. #define HWSTATUS_MASK 0x54
  116. #define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
  117. #define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
  118. #define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
  119. #define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
  120. #define BCH_CONFIG 0xcc
  121. #define BCH_ENABLE BIT(0)
  122. #define BCH_TVAL_4 (0 << 4)
  123. #define BCH_TVAL_8 (1 << 4)
  124. #define BCH_TVAL_14 (2 << 4)
  125. #define BCH_TVAL_16 (3 << 4)
  126. #define DEC_STAT_RESULT 0xd0
  127. #define DEC_STAT_BUF 0xd4
  128. #define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
  129. #define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
  130. #define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
  131. #define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
  132. #define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
  133. #define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
  134. #define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
  135. #define SKIP_SPARE_BYTES 4
  136. #define BITS_PER_STEP_RS 18
  137. #define BITS_PER_STEP_BCH 13
  138. #define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
  139. #define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
  140. #define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
  141. HWSTATUS_RDSTATUS_VALUE(0) | \
  142. HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
  143. HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
  144. struct tegra_nand_controller {
  145. struct nand_controller controller;
  146. struct device *dev;
  147. void __iomem *regs;
  148. int irq;
  149. struct clk *clk;
  150. struct completion command_complete;
  151. struct completion dma_complete;
  152. bool last_read_error;
  153. int cur_cs;
  154. struct nand_chip *chip;
  155. };
  156. struct tegra_nand_chip {
  157. struct nand_chip chip;
  158. struct gpio_desc *wp_gpio;
  159. struct mtd_oob_region ecc;
  160. u32 config;
  161. u32 config_ecc;
  162. u32 bch_config;
  163. int cs[1];
  164. };
  165. static inline struct tegra_nand_controller *
  166. to_tegra_ctrl(struct nand_controller *hw_ctrl)
  167. {
  168. return container_of(hw_ctrl, struct tegra_nand_controller, controller);
  169. }
  170. static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
  171. {
  172. return container_of(chip, struct tegra_nand_chip, chip);
  173. }
  174. static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
  175. struct mtd_oob_region *oobregion)
  176. {
  177. struct nand_chip *chip = mtd_to_nand(mtd);
  178. int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
  179. BITS_PER_BYTE);
  180. if (section > 0)
  181. return -ERANGE;
  182. oobregion->offset = SKIP_SPARE_BYTES;
  183. oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
  184. return 0;
  185. }
  186. static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
  187. struct mtd_oob_region *oobregion)
  188. {
  189. return -ERANGE;
  190. }
  191. static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
  192. .ecc = tegra_nand_ooblayout_rs_ecc,
  193. .free = tegra_nand_ooblayout_no_free,
  194. };
  195. static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
  196. struct mtd_oob_region *oobregion)
  197. {
  198. struct nand_chip *chip = mtd_to_nand(mtd);
  199. int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
  200. BITS_PER_BYTE);
  201. if (section > 0)
  202. return -ERANGE;
  203. oobregion->offset = SKIP_SPARE_BYTES;
  204. oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
  205. return 0;
  206. }
  207. static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
  208. .ecc = tegra_nand_ooblayout_bch_ecc,
  209. .free = tegra_nand_ooblayout_no_free,
  210. };
  211. static irqreturn_t tegra_nand_irq(int irq, void *data)
  212. {
  213. struct tegra_nand_controller *ctrl = data;
  214. u32 isr, dma;
  215. isr = readl_relaxed(ctrl->regs + ISR);
  216. dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
  217. dev_dbg(ctrl->dev, "isr %08x\n", isr);
  218. if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
  219. return IRQ_NONE;
  220. /*
  221. * The bit name is somewhat missleading: This is also set when
  222. * HW ECC was successful. The data sheet states:
  223. * Correctable OR Un-correctable errors occurred in the DMA transfer...
  224. */
  225. if (isr & ISR_CORRFAIL_ERR)
  226. ctrl->last_read_error = true;
  227. if (isr & ISR_CMD_DONE)
  228. complete(&ctrl->command_complete);
  229. if (isr & ISR_UND)
  230. dev_err(ctrl->dev, "FIFO underrun\n");
  231. if (isr & ISR_OVR)
  232. dev_err(ctrl->dev, "FIFO overrun\n");
  233. /* handle DMA interrupts */
  234. if (dma & DMA_MST_CTRL_IS_DONE) {
  235. writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
  236. complete(&ctrl->dma_complete);
  237. }
  238. /* clear interrupts */
  239. writel_relaxed(isr, ctrl->regs + ISR);
  240. return IRQ_HANDLED;
  241. }
  242. static const char * const tegra_nand_reg_names[] = {
  243. "COMMAND",
  244. "STATUS",
  245. "ISR",
  246. "IER",
  247. "CONFIG",
  248. "TIMING",
  249. NULL,
  250. "TIMING2",
  251. "CMD_REG1",
  252. "CMD_REG2",
  253. "ADDR_REG1",
  254. "ADDR_REG2",
  255. "DMA_MST_CTRL",
  256. "DMA_CFG_A",
  257. "DMA_CFG_B",
  258. "FIFO_CTRL",
  259. };
  260. static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
  261. {
  262. u32 reg;
  263. int i;
  264. dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
  265. for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
  266. const char *reg_name = tegra_nand_reg_names[i];
  267. if (!reg_name)
  268. continue;
  269. reg = readl_relaxed(ctrl->regs + (i * 4));
  270. dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
  271. }
  272. }
  273. static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
  274. {
  275. u32 isr, dma;
  276. disable_irq(ctrl->irq);
  277. /* Abort current command/DMA operation */
  278. writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
  279. writel_relaxed(0, ctrl->regs + COMMAND);
  280. /* clear interrupts */
  281. isr = readl_relaxed(ctrl->regs + ISR);
  282. writel_relaxed(isr, ctrl->regs + ISR);
  283. dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
  284. writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
  285. reinit_completion(&ctrl->command_complete);
  286. reinit_completion(&ctrl->dma_complete);
  287. enable_irq(ctrl->irq);
  288. }
  289. static int tegra_nand_cmd(struct nand_chip *chip,
  290. const struct nand_subop *subop)
  291. {
  292. const struct nand_op_instr *instr;
  293. const struct nand_op_instr *instr_data_in = NULL;
  294. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  295. unsigned int op_id, size = 0, offset = 0;
  296. bool first_cmd = true;
  297. u32 reg, cmd = 0;
  298. int ret;
  299. for (op_id = 0; op_id < subop->ninstrs; op_id++) {
  300. unsigned int naddrs, i;
  301. const u8 *addrs;
  302. u32 addr1 = 0, addr2 = 0;
  303. instr = &subop->instrs[op_id];
  304. switch (instr->type) {
  305. case NAND_OP_CMD_INSTR:
  306. if (first_cmd) {
  307. cmd |= COMMAND_CLE;
  308. writel_relaxed(instr->ctx.cmd.opcode,
  309. ctrl->regs + CMD_REG1);
  310. } else {
  311. cmd |= COMMAND_SEC_CMD;
  312. writel_relaxed(instr->ctx.cmd.opcode,
  313. ctrl->regs + CMD_REG2);
  314. }
  315. first_cmd = false;
  316. break;
  317. case NAND_OP_ADDR_INSTR:
  318. offset = nand_subop_get_addr_start_off(subop, op_id);
  319. naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
  320. addrs = &instr->ctx.addr.addrs[offset];
  321. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
  322. for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
  323. addr1 |= *addrs++ << (BITS_PER_BYTE * i);
  324. naddrs -= i;
  325. for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
  326. addr2 |= *addrs++ << (BITS_PER_BYTE * i);
  327. writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
  328. writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
  329. break;
  330. case NAND_OP_DATA_IN_INSTR:
  331. size = nand_subop_get_data_len(subop, op_id);
  332. offset = nand_subop_get_data_start_off(subop, op_id);
  333. cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
  334. COMMAND_RX | COMMAND_A_VALID;
  335. instr_data_in = instr;
  336. break;
  337. case NAND_OP_DATA_OUT_INSTR:
  338. size = nand_subop_get_data_len(subop, op_id);
  339. offset = nand_subop_get_data_start_off(subop, op_id);
  340. cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
  341. COMMAND_TX | COMMAND_A_VALID;
  342. memcpy(&reg, instr->ctx.data.buf.out + offset, size);
  343. writel_relaxed(reg, ctrl->regs + RESP);
  344. break;
  345. case NAND_OP_WAITRDY_INSTR:
  346. cmd |= COMMAND_RBSY_CHK;
  347. break;
  348. }
  349. }
  350. cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
  351. writel_relaxed(cmd, ctrl->regs + COMMAND);
  352. ret = wait_for_completion_timeout(&ctrl->command_complete,
  353. msecs_to_jiffies(500));
  354. if (!ret) {
  355. dev_err(ctrl->dev, "COMMAND timeout\n");
  356. tegra_nand_dump_reg(ctrl);
  357. tegra_nand_controller_abort(ctrl);
  358. return -ETIMEDOUT;
  359. }
  360. if (instr_data_in) {
  361. reg = readl_relaxed(ctrl->regs + RESP);
  362. memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
  363. }
  364. return 0;
  365. }
  366. static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
  367. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  368. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  369. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  370. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  371. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
  372. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  373. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
  374. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  375. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  376. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  377. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  378. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
  379. NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
  380. );
  381. static int tegra_nand_exec_op(struct nand_chip *chip,
  382. const struct nand_operation *op,
  383. bool check_only)
  384. {
  385. return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
  386. check_only);
  387. }
  388. static void tegra_nand_select_chip(struct nand_chip *chip, int die_nr)
  389. {
  390. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  391. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  392. WARN_ON(die_nr >= (int)ARRAY_SIZE(nand->cs));
  393. if (die_nr < 0 || die_nr > 0) {
  394. ctrl->cur_cs = -1;
  395. return;
  396. }
  397. ctrl->cur_cs = nand->cs[die_nr];
  398. }
  399. static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
  400. struct nand_chip *chip, bool enable)
  401. {
  402. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  403. if (chip->ecc.algo == NAND_ECC_BCH && enable)
  404. writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
  405. else
  406. writel_relaxed(0, ctrl->regs + BCH_CONFIG);
  407. if (enable)
  408. writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
  409. else
  410. writel_relaxed(nand->config, ctrl->regs + CONFIG);
  411. }
  412. static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
  413. void *buf, void *oob_buf, int oob_len, int page,
  414. bool read)
  415. {
  416. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  417. enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  418. dma_addr_t dma_addr = 0, dma_addr_oob = 0;
  419. u32 addr1, cmd, dma_ctrl;
  420. int ret;
  421. if (read) {
  422. writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
  423. writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
  424. } else {
  425. writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
  426. writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
  427. }
  428. cmd = COMMAND_CLE | COMMAND_SEC_CMD;
  429. /* Lower 16-bits are column, by default 0 */
  430. addr1 = page << 16;
  431. if (!buf)
  432. addr1 |= mtd->writesize;
  433. writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
  434. if (chip->options & NAND_ROW_ADDR_3) {
  435. writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
  436. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
  437. } else {
  438. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
  439. }
  440. if (buf) {
  441. dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
  442. ret = dma_mapping_error(ctrl->dev, dma_addr);
  443. if (ret) {
  444. dev_err(ctrl->dev, "dma mapping error\n");
  445. return -EINVAL;
  446. }
  447. writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
  448. writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
  449. }
  450. if (oob_buf) {
  451. dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
  452. dir);
  453. ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
  454. if (ret) {
  455. dev_err(ctrl->dev, "dma mapping error\n");
  456. ret = -EINVAL;
  457. goto err_unmap_dma_page;
  458. }
  459. writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
  460. writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
  461. }
  462. dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
  463. DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
  464. DMA_MST_CTRL_BURST_16;
  465. if (buf)
  466. dma_ctrl |= DMA_MST_CTRL_EN_A;
  467. if (oob_buf)
  468. dma_ctrl |= DMA_MST_CTRL_EN_B;
  469. if (read)
  470. dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
  471. else
  472. dma_ctrl |= DMA_MST_CTRL_OUT;
  473. writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
  474. cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
  475. COMMAND_CE(ctrl->cur_cs);
  476. if (buf)
  477. cmd |= COMMAND_A_VALID;
  478. if (oob_buf)
  479. cmd |= COMMAND_B_VALID;
  480. if (read)
  481. cmd |= COMMAND_RX;
  482. else
  483. cmd |= COMMAND_TX | COMMAND_AFT_DAT;
  484. writel_relaxed(cmd, ctrl->regs + COMMAND);
  485. ret = wait_for_completion_timeout(&ctrl->command_complete,
  486. msecs_to_jiffies(500));
  487. if (!ret) {
  488. dev_err(ctrl->dev, "COMMAND timeout\n");
  489. tegra_nand_dump_reg(ctrl);
  490. tegra_nand_controller_abort(ctrl);
  491. ret = -ETIMEDOUT;
  492. goto err_unmap_dma;
  493. }
  494. ret = wait_for_completion_timeout(&ctrl->dma_complete,
  495. msecs_to_jiffies(500));
  496. if (!ret) {
  497. dev_err(ctrl->dev, "DMA timeout\n");
  498. tegra_nand_dump_reg(ctrl);
  499. tegra_nand_controller_abort(ctrl);
  500. ret = -ETIMEDOUT;
  501. goto err_unmap_dma;
  502. }
  503. ret = 0;
  504. err_unmap_dma:
  505. if (oob_buf)
  506. dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
  507. err_unmap_dma_page:
  508. if (buf)
  509. dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
  510. return ret;
  511. }
  512. static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
  513. int oob_required, int page)
  514. {
  515. struct mtd_info *mtd = nand_to_mtd(chip);
  516. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  517. return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
  518. mtd->oobsize, page, true);
  519. }
  520. static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
  521. int oob_required, int page)
  522. {
  523. struct mtd_info *mtd = nand_to_mtd(chip);
  524. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  525. return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
  526. mtd->oobsize, page, false);
  527. }
  528. static int tegra_nand_read_oob(struct nand_chip *chip, int page)
  529. {
  530. struct mtd_info *mtd = nand_to_mtd(chip);
  531. return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
  532. mtd->oobsize, page, true);
  533. }
  534. static int tegra_nand_write_oob(struct nand_chip *chip, int page)
  535. {
  536. struct mtd_info *mtd = nand_to_mtd(chip);
  537. return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
  538. mtd->oobsize, page, false);
  539. }
  540. static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
  541. int oob_required, int page)
  542. {
  543. struct mtd_info *mtd = nand_to_mtd(chip);
  544. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  545. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  546. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  547. u32 dec_stat, max_corr_cnt;
  548. unsigned long fail_sec_flag;
  549. int ret;
  550. tegra_nand_hw_ecc(ctrl, chip, true);
  551. ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
  552. tegra_nand_hw_ecc(ctrl, chip, false);
  553. if (ret)
  554. return ret;
  555. /* No correctable or un-correctable errors, page must have 0 bitflips */
  556. if (!ctrl->last_read_error)
  557. return 0;
  558. /*
  559. * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
  560. * which contains information for all ECC selections.
  561. *
  562. * Note that since we do not use Command Queues DEC_RESULT does not
  563. * state the number of pages we can read from the DEC_STAT_BUF. But
  564. * since CORRFAIL_ERR did occur during page read we do have a valid
  565. * result in DEC_STAT_BUF.
  566. */
  567. ctrl->last_read_error = false;
  568. dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
  569. fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
  570. DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
  571. max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
  572. DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
  573. if (fail_sec_flag) {
  574. int bit, max_bitflips = 0;
  575. /*
  576. * Since we do not support subpage writes, a complete page
  577. * is either written or not. We can take a shortcut here by
  578. * checking wheather any of the sector has been successful
  579. * read. If at least one sectors has been read successfully,
  580. * the page must have been a written previously. It cannot
  581. * be an erased page.
  582. *
  583. * E.g. controller might return fail_sec_flag with 0x4, which
  584. * would mean only the third sector failed to correct. The
  585. * page must have been written and the third sector is really
  586. * not correctable anymore.
  587. */
  588. if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
  589. mtd->ecc_stats.failed += hweight8(fail_sec_flag);
  590. return max_corr_cnt;
  591. }
  592. /*
  593. * All sectors failed to correct, but the ECC isn't smart
  594. * enough to figure out if a page is really just erased.
  595. * Read OOB data and check whether data/OOB is completely
  596. * erased or if error correction just failed for all sub-
  597. * pages.
  598. */
  599. ret = tegra_nand_read_oob(chip, page);
  600. if (ret < 0)
  601. return ret;
  602. for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
  603. u8 *data = buf + (chip->ecc.size * bit);
  604. u8 *oob = chip->oob_poi + nand->ecc.offset +
  605. (chip->ecc.bytes * bit);
  606. ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
  607. oob, chip->ecc.bytes,
  608. NULL, 0,
  609. chip->ecc.strength);
  610. if (ret < 0) {
  611. mtd->ecc_stats.failed++;
  612. } else {
  613. mtd->ecc_stats.corrected += ret;
  614. max_bitflips = max(ret, max_bitflips);
  615. }
  616. }
  617. return max_t(unsigned int, max_corr_cnt, max_bitflips);
  618. } else {
  619. int corr_sec_flag;
  620. corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
  621. DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
  622. /*
  623. * The value returned in the register is the maximum of
  624. * bitflips encountered in any of the ECC regions. As there is
  625. * no way to get the number of bitflips in a specific regions
  626. * we are not able to deliver correct stats but instead
  627. * overestimate the number of corrected bitflips by assuming
  628. * that all regions where errors have been corrected
  629. * encountered the maximum number of bitflips.
  630. */
  631. mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
  632. return max_corr_cnt;
  633. }
  634. }
  635. static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
  636. int oob_required, int page)
  637. {
  638. struct mtd_info *mtd = nand_to_mtd(chip);
  639. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  640. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  641. int ret;
  642. tegra_nand_hw_ecc(ctrl, chip, true);
  643. ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
  644. 0, page, false);
  645. tegra_nand_hw_ecc(ctrl, chip, false);
  646. return ret;
  647. }
  648. static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
  649. const struct nand_sdr_timings *timings)
  650. {
  651. /*
  652. * The period (and all other timings in this function) is in ps,
  653. * so need to take care here to avoid integer overflows.
  654. */
  655. unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
  656. unsigned int period = DIV_ROUND_UP(1000000, rate);
  657. u32 val, reg = 0;
  658. val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
  659. timings->tRC_min), period);
  660. reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
  661. val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
  662. max(timings->tALS_min, timings->tALH_min)),
  663. period);
  664. reg |= TIMING_TCS(OFFSET(val, 2));
  665. val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
  666. period);
  667. reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
  668. reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
  669. reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
  670. reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
  671. reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
  672. reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
  673. writel_relaxed(reg, ctrl->regs + TIMING_1);
  674. val = DIV_ROUND_UP(timings->tADL_min, period);
  675. reg = TIMING_TADL(OFFSET(val, 3));
  676. writel_relaxed(reg, ctrl->regs + TIMING_2);
  677. }
  678. static int tegra_nand_setup_data_interface(struct nand_chip *chip, int csline,
  679. const struct nand_data_interface *conf)
  680. {
  681. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  682. const struct nand_sdr_timings *timings;
  683. timings = nand_get_sdr_timings(conf);
  684. if (IS_ERR(timings))
  685. return PTR_ERR(timings);
  686. if (csline == NAND_DATA_IFACE_CHECK_ONLY)
  687. return 0;
  688. tegra_nand_setup_timing(ctrl, timings);
  689. return 0;
  690. }
  691. static const int rs_strength_bootable[] = { 4 };
  692. static const int rs_strength[] = { 4, 6, 8 };
  693. static const int bch_strength_bootable[] = { 8, 16 };
  694. static const int bch_strength[] = { 4, 8, 14, 16 };
  695. static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
  696. int strength_len, int bits_per_step,
  697. int oobsize)
  698. {
  699. bool maximize = chip->ecc.options & NAND_ECC_MAXIMIZE;
  700. int i;
  701. /*
  702. * Loop through available strengths. Backwards in case we try to
  703. * maximize the BCH strength.
  704. */
  705. for (i = 0; i < strength_len; i++) {
  706. int strength_sel, bytes_per_step, bytes_per_page;
  707. if (maximize) {
  708. strength_sel = strength[strength_len - i - 1];
  709. } else {
  710. strength_sel = strength[i];
  711. if (strength_sel < chip->ecc_strength_ds)
  712. continue;
  713. }
  714. bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
  715. BITS_PER_BYTE);
  716. bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
  717. /* Check whether strength fits OOB */
  718. if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
  719. return strength_sel;
  720. }
  721. return -EINVAL;
  722. }
  723. static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
  724. {
  725. const int *strength;
  726. int strength_len, bits_per_step;
  727. switch (chip->ecc.algo) {
  728. case NAND_ECC_RS:
  729. bits_per_step = BITS_PER_STEP_RS;
  730. if (chip->options & NAND_IS_BOOT_MEDIUM) {
  731. strength = rs_strength_bootable;
  732. strength_len = ARRAY_SIZE(rs_strength_bootable);
  733. } else {
  734. strength = rs_strength;
  735. strength_len = ARRAY_SIZE(rs_strength);
  736. }
  737. break;
  738. case NAND_ECC_BCH:
  739. bits_per_step = BITS_PER_STEP_BCH;
  740. if (chip->options & NAND_IS_BOOT_MEDIUM) {
  741. strength = bch_strength_bootable;
  742. strength_len = ARRAY_SIZE(bch_strength_bootable);
  743. } else {
  744. strength = bch_strength;
  745. strength_len = ARRAY_SIZE(bch_strength);
  746. }
  747. break;
  748. default:
  749. return -EINVAL;
  750. }
  751. return tegra_nand_get_strength(chip, strength, strength_len,
  752. bits_per_step, oobsize);
  753. }
  754. static int tegra_nand_attach_chip(struct nand_chip *chip)
  755. {
  756. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  757. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  758. struct mtd_info *mtd = nand_to_mtd(chip);
  759. int bits_per_step;
  760. int ret;
  761. if (chip->bbt_options & NAND_BBT_USE_FLASH)
  762. chip->bbt_options |= NAND_BBT_NO_OOB;
  763. chip->ecc.mode = NAND_ECC_HW;
  764. chip->ecc.size = 512;
  765. chip->ecc.steps = mtd->writesize / chip->ecc.size;
  766. if (chip->ecc_step_ds != 512) {
  767. dev_err(ctrl->dev, "Unsupported step size %d\n",
  768. chip->ecc_step_ds);
  769. return -EINVAL;
  770. }
  771. chip->ecc.read_page = tegra_nand_read_page_hwecc;
  772. chip->ecc.write_page = tegra_nand_write_page_hwecc;
  773. chip->ecc.read_page_raw = tegra_nand_read_page_raw;
  774. chip->ecc.write_page_raw = tegra_nand_write_page_raw;
  775. chip->ecc.read_oob = tegra_nand_read_oob;
  776. chip->ecc.write_oob = tegra_nand_write_oob;
  777. if (chip->options & NAND_BUSWIDTH_16)
  778. nand->config |= CONFIG_BUS_WIDTH_16;
  779. if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
  780. if (mtd->writesize < 2048)
  781. chip->ecc.algo = NAND_ECC_RS;
  782. else
  783. chip->ecc.algo = NAND_ECC_BCH;
  784. }
  785. if (chip->ecc.algo == NAND_ECC_BCH && mtd->writesize < 2048) {
  786. dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
  787. return -EINVAL;
  788. }
  789. if (!chip->ecc.strength) {
  790. ret = tegra_nand_select_strength(chip, mtd->oobsize);
  791. if (ret < 0) {
  792. dev_err(ctrl->dev,
  793. "No valid strength found, minimum %d\n",
  794. chip->ecc_strength_ds);
  795. return ret;
  796. }
  797. chip->ecc.strength = ret;
  798. }
  799. nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
  800. CONFIG_SKIP_SPARE_SIZE_4;
  801. switch (chip->ecc.algo) {
  802. case NAND_ECC_RS:
  803. bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
  804. mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
  805. nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
  806. CONFIG_ERR_COR;
  807. switch (chip->ecc.strength) {
  808. case 4:
  809. nand->config_ecc |= CONFIG_TVAL_4;
  810. break;
  811. case 6:
  812. nand->config_ecc |= CONFIG_TVAL_6;
  813. break;
  814. case 8:
  815. nand->config_ecc |= CONFIG_TVAL_8;
  816. break;
  817. default:
  818. dev_err(ctrl->dev, "ECC strength %d not supported\n",
  819. chip->ecc.strength);
  820. return -EINVAL;
  821. }
  822. break;
  823. case NAND_ECC_BCH:
  824. bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
  825. mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
  826. nand->bch_config = BCH_ENABLE;
  827. switch (chip->ecc.strength) {
  828. case 4:
  829. nand->bch_config |= BCH_TVAL_4;
  830. break;
  831. case 8:
  832. nand->bch_config |= BCH_TVAL_8;
  833. break;
  834. case 14:
  835. nand->bch_config |= BCH_TVAL_14;
  836. break;
  837. case 16:
  838. nand->bch_config |= BCH_TVAL_16;
  839. break;
  840. default:
  841. dev_err(ctrl->dev, "ECC strength %d not supported\n",
  842. chip->ecc.strength);
  843. return -EINVAL;
  844. }
  845. break;
  846. default:
  847. dev_err(ctrl->dev, "ECC algorithm not supported\n");
  848. return -EINVAL;
  849. }
  850. dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
  851. chip->ecc.algo == NAND_ECC_BCH ? "BCH" : "RS",
  852. chip->ecc.strength);
  853. chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
  854. switch (mtd->writesize) {
  855. case 256:
  856. nand->config |= CONFIG_PS_256;
  857. break;
  858. case 512:
  859. nand->config |= CONFIG_PS_512;
  860. break;
  861. case 1024:
  862. nand->config |= CONFIG_PS_1024;
  863. break;
  864. case 2048:
  865. nand->config |= CONFIG_PS_2048;
  866. break;
  867. case 4096:
  868. nand->config |= CONFIG_PS_4096;
  869. break;
  870. default:
  871. dev_err(ctrl->dev, "Unsupported writesize %d\n",
  872. mtd->writesize);
  873. return -ENODEV;
  874. }
  875. /* Store complete configuration for HW ECC in config_ecc */
  876. nand->config_ecc |= nand->config;
  877. /* Non-HW ECC read/writes complete OOB */
  878. nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
  879. writel_relaxed(nand->config, ctrl->regs + CONFIG);
  880. return 0;
  881. }
  882. static const struct nand_controller_ops tegra_nand_controller_ops = {
  883. .attach_chip = &tegra_nand_attach_chip,
  884. };
  885. static int tegra_nand_chips_init(struct device *dev,
  886. struct tegra_nand_controller *ctrl)
  887. {
  888. struct device_node *np = dev->of_node;
  889. struct device_node *np_nand;
  890. int nsels, nchips = of_get_child_count(np);
  891. struct tegra_nand_chip *nand;
  892. struct mtd_info *mtd;
  893. struct nand_chip *chip;
  894. int ret;
  895. u32 cs;
  896. if (nchips != 1) {
  897. dev_err(dev, "Currently only one NAND chip supported\n");
  898. return -EINVAL;
  899. }
  900. np_nand = of_get_next_child(np, NULL);
  901. nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
  902. if (nsels != 1) {
  903. dev_err(dev, "Missing/invalid reg property\n");
  904. return -EINVAL;
  905. }
  906. /* Retrieve CS id, currently only single die NAND supported */
  907. ret = of_property_read_u32(np_nand, "reg", &cs);
  908. if (ret) {
  909. dev_err(dev, "could not retrieve reg property: %d\n", ret);
  910. return ret;
  911. }
  912. nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
  913. if (!nand)
  914. return -ENOMEM;
  915. nand->cs[0] = cs;
  916. nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
  917. if (IS_ERR(nand->wp_gpio)) {
  918. ret = PTR_ERR(nand->wp_gpio);
  919. dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
  920. return ret;
  921. }
  922. chip = &nand->chip;
  923. chip->controller = &ctrl->controller;
  924. mtd = nand_to_mtd(chip);
  925. mtd->dev.parent = dev;
  926. mtd->owner = THIS_MODULE;
  927. nand_set_flash_node(chip, np_nand);
  928. if (!mtd->name)
  929. mtd->name = "tegra_nand";
  930. chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
  931. chip->exec_op = tegra_nand_exec_op;
  932. chip->select_chip = tegra_nand_select_chip;
  933. chip->setup_data_interface = tegra_nand_setup_data_interface;
  934. ret = nand_scan(chip, 1);
  935. if (ret)
  936. return ret;
  937. mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
  938. ret = mtd_device_register(mtd, NULL, 0);
  939. if (ret) {
  940. dev_err(dev, "Failed to register mtd device: %d\n", ret);
  941. nand_cleanup(chip);
  942. return ret;
  943. }
  944. ctrl->chip = chip;
  945. return 0;
  946. }
  947. static int tegra_nand_probe(struct platform_device *pdev)
  948. {
  949. struct reset_control *rst;
  950. struct tegra_nand_controller *ctrl;
  951. struct resource *res;
  952. int err = 0;
  953. ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
  954. if (!ctrl)
  955. return -ENOMEM;
  956. ctrl->dev = &pdev->dev;
  957. nand_controller_init(&ctrl->controller);
  958. ctrl->controller.ops = &tegra_nand_controller_ops;
  959. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  960. ctrl->regs = devm_ioremap_resource(&pdev->dev, res);
  961. if (IS_ERR(ctrl->regs))
  962. return PTR_ERR(ctrl->regs);
  963. rst = devm_reset_control_get(&pdev->dev, "nand");
  964. if (IS_ERR(rst))
  965. return PTR_ERR(rst);
  966. ctrl->clk = devm_clk_get(&pdev->dev, "nand");
  967. if (IS_ERR(ctrl->clk))
  968. return PTR_ERR(ctrl->clk);
  969. err = clk_prepare_enable(ctrl->clk);
  970. if (err)
  971. return err;
  972. err = reset_control_reset(rst);
  973. if (err) {
  974. dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
  975. goto err_disable_clk;
  976. }
  977. writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
  978. writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
  979. writel_relaxed(INT_MASK, ctrl->regs + IER);
  980. init_completion(&ctrl->command_complete);
  981. init_completion(&ctrl->dma_complete);
  982. ctrl->irq = platform_get_irq(pdev, 0);
  983. err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
  984. dev_name(&pdev->dev), ctrl);
  985. if (err) {
  986. dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
  987. goto err_disable_clk;
  988. }
  989. writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
  990. err = tegra_nand_chips_init(ctrl->dev, ctrl);
  991. if (err)
  992. goto err_disable_clk;
  993. platform_set_drvdata(pdev, ctrl);
  994. return 0;
  995. err_disable_clk:
  996. clk_disable_unprepare(ctrl->clk);
  997. return err;
  998. }
  999. static int tegra_nand_remove(struct platform_device *pdev)
  1000. {
  1001. struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
  1002. struct nand_chip *chip = ctrl->chip;
  1003. struct mtd_info *mtd = nand_to_mtd(chip);
  1004. int ret;
  1005. ret = mtd_device_unregister(mtd);
  1006. if (ret)
  1007. return ret;
  1008. nand_cleanup(chip);
  1009. clk_disable_unprepare(ctrl->clk);
  1010. return 0;
  1011. }
  1012. static const struct of_device_id tegra_nand_of_match[] = {
  1013. { .compatible = "nvidia,tegra20-nand" },
  1014. { /* sentinel */ }
  1015. };
  1016. MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
  1017. static struct platform_driver tegra_nand_driver = {
  1018. .driver = {
  1019. .name = "tegra-nand",
  1020. .of_match_table = tegra_nand_of_match,
  1021. },
  1022. .probe = tegra_nand_probe,
  1023. .remove = tegra_nand_remove,
  1024. };
  1025. module_platform_driver(tegra_nand_driver);
  1026. MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
  1027. MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
  1028. MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
  1029. MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
  1030. MODULE_LICENSE("GPL v2");