sh_flctl.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /*
  2. * SuperH FLCTL nand controller
  3. *
  4. * Copyright (c) 2008 Renesas Solutions Corp.
  5. * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
  6. *
  7. * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. */
  23. #include <linux/module.h>
  24. #include <linux/kernel.h>
  25. #include <linux/completion.h>
  26. #include <linux/delay.h>
  27. #include <linux/dmaengine.h>
  28. #include <linux/dma-mapping.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/io.h>
  31. #include <linux/of.h>
  32. #include <linux/of_device.h>
  33. #include <linux/platform_device.h>
  34. #include <linux/pm_runtime.h>
  35. #include <linux/sh_dma.h>
  36. #include <linux/slab.h>
  37. #include <linux/string.h>
  38. #include <linux/mtd/mtd.h>
  39. #include <linux/mtd/rawnand.h>
  40. #include <linux/mtd/partitions.h>
  41. #include <linux/mtd/sh_flctl.h>
  42. static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
  43. struct mtd_oob_region *oobregion)
  44. {
  45. struct nand_chip *chip = mtd_to_nand(mtd);
  46. if (section)
  47. return -ERANGE;
  48. oobregion->offset = 0;
  49. oobregion->length = chip->ecc.bytes;
  50. return 0;
  51. }
  52. static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
  53. struct mtd_oob_region *oobregion)
  54. {
  55. if (section)
  56. return -ERANGE;
  57. oobregion->offset = 12;
  58. oobregion->length = 4;
  59. return 0;
  60. }
  61. static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
  62. .ecc = flctl_4secc_ooblayout_sp_ecc,
  63. .free = flctl_4secc_ooblayout_sp_free,
  64. };
  65. static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
  66. struct mtd_oob_region *oobregion)
  67. {
  68. struct nand_chip *chip = mtd_to_nand(mtd);
  69. if (section >= chip->ecc.steps)
  70. return -ERANGE;
  71. oobregion->offset = (section * 16) + 6;
  72. oobregion->length = chip->ecc.bytes;
  73. return 0;
  74. }
  75. static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
  76. struct mtd_oob_region *oobregion)
  77. {
  78. struct nand_chip *chip = mtd_to_nand(mtd);
  79. if (section >= chip->ecc.steps)
  80. return -ERANGE;
  81. oobregion->offset = section * 16;
  82. oobregion->length = 6;
  83. if (!section) {
  84. oobregion->offset += 2;
  85. oobregion->length -= 2;
  86. }
  87. return 0;
  88. }
  89. static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
  90. .ecc = flctl_4secc_ooblayout_lp_ecc,
  91. .free = flctl_4secc_ooblayout_lp_free,
  92. };
  93. static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
  94. static struct nand_bbt_descr flctl_4secc_smallpage = {
  95. .options = NAND_BBT_SCAN2NDPAGE,
  96. .offs = 11,
  97. .len = 1,
  98. .pattern = scan_ff_pattern,
  99. };
  100. static struct nand_bbt_descr flctl_4secc_largepage = {
  101. .options = NAND_BBT_SCAN2NDPAGE,
  102. .offs = 0,
  103. .len = 2,
  104. .pattern = scan_ff_pattern,
  105. };
  106. static void empty_fifo(struct sh_flctl *flctl)
  107. {
  108. writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
  109. writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
  110. }
  111. static void start_translation(struct sh_flctl *flctl)
  112. {
  113. writeb(TRSTRT, FLTRCR(flctl));
  114. }
  115. static void timeout_error(struct sh_flctl *flctl, const char *str)
  116. {
  117. dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
  118. }
  119. static void wait_completion(struct sh_flctl *flctl)
  120. {
  121. uint32_t timeout = LOOP_TIMEOUT_MAX;
  122. while (timeout--) {
  123. if (readb(FLTRCR(flctl)) & TREND) {
  124. writeb(0x0, FLTRCR(flctl));
  125. return;
  126. }
  127. udelay(1);
  128. }
  129. timeout_error(flctl, __func__);
  130. writeb(0x0, FLTRCR(flctl));
  131. }
  132. static void flctl_dma_complete(void *param)
  133. {
  134. struct sh_flctl *flctl = param;
  135. complete(&flctl->dma_complete);
  136. }
  137. static void flctl_release_dma(struct sh_flctl *flctl)
  138. {
  139. if (flctl->chan_fifo0_rx) {
  140. dma_release_channel(flctl->chan_fifo0_rx);
  141. flctl->chan_fifo0_rx = NULL;
  142. }
  143. if (flctl->chan_fifo0_tx) {
  144. dma_release_channel(flctl->chan_fifo0_tx);
  145. flctl->chan_fifo0_tx = NULL;
  146. }
  147. }
  148. static void flctl_setup_dma(struct sh_flctl *flctl)
  149. {
  150. dma_cap_mask_t mask;
  151. struct dma_slave_config cfg;
  152. struct platform_device *pdev = flctl->pdev;
  153. struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
  154. int ret;
  155. if (!pdata)
  156. return;
  157. if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
  158. return;
  159. /* We can only either use DMA for both Tx and Rx or not use it at all */
  160. dma_cap_zero(mask);
  161. dma_cap_set(DMA_SLAVE, mask);
  162. flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
  163. (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
  164. dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
  165. flctl->chan_fifo0_tx);
  166. if (!flctl->chan_fifo0_tx)
  167. return;
  168. memset(&cfg, 0, sizeof(cfg));
  169. cfg.direction = DMA_MEM_TO_DEV;
  170. cfg.dst_addr = flctl->fifo;
  171. cfg.src_addr = 0;
  172. ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
  173. if (ret < 0)
  174. goto err;
  175. flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
  176. (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
  177. dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
  178. flctl->chan_fifo0_rx);
  179. if (!flctl->chan_fifo0_rx)
  180. goto err;
  181. cfg.direction = DMA_DEV_TO_MEM;
  182. cfg.dst_addr = 0;
  183. cfg.src_addr = flctl->fifo;
  184. ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
  185. if (ret < 0)
  186. goto err;
  187. init_completion(&flctl->dma_complete);
  188. return;
  189. err:
  190. flctl_release_dma(flctl);
  191. }
  192. static void set_addr(struct mtd_info *mtd, int column, int page_addr)
  193. {
  194. struct sh_flctl *flctl = mtd_to_flctl(mtd);
  195. uint32_t addr = 0;
  196. if (column == -1) {
  197. addr = page_addr; /* ERASE1 */
  198. } else if (page_addr != -1) {
  199. /* SEQIN, READ0, etc.. */
  200. if (flctl->chip.options & NAND_BUSWIDTH_16)
  201. column >>= 1;
  202. if (flctl->page_size) {
  203. addr = column & 0x0FFF;
  204. addr |= (page_addr & 0xff) << 16;
  205. addr |= ((page_addr >> 8) & 0xff) << 24;
  206. /* big than 128MB */
  207. if (flctl->rw_ADRCNT == ADRCNT2_E) {
  208. uint32_t addr2;
  209. addr2 = (page_addr >> 16) & 0xff;
  210. writel(addr2, FLADR2(flctl));
  211. }
  212. } else {
  213. addr = column;
  214. addr |= (page_addr & 0xff) << 8;
  215. addr |= ((page_addr >> 8) & 0xff) << 16;
  216. addr |= ((page_addr >> 16) & 0xff) << 24;
  217. }
  218. }
  219. writel(addr, FLADR(flctl));
  220. }
  221. static void wait_rfifo_ready(struct sh_flctl *flctl)
  222. {
  223. uint32_t timeout = LOOP_TIMEOUT_MAX;
  224. while (timeout--) {
  225. uint32_t val;
  226. /* check FIFO */
  227. val = readl(FLDTCNTR(flctl)) >> 16;
  228. if (val & 0xFF)
  229. return;
  230. udelay(1);
  231. }
  232. timeout_error(flctl, __func__);
  233. }
  234. static void wait_wfifo_ready(struct sh_flctl *flctl)
  235. {
  236. uint32_t len, timeout = LOOP_TIMEOUT_MAX;
  237. while (timeout--) {
  238. /* check FIFO */
  239. len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
  240. if (len >= 4)
  241. return;
  242. udelay(1);
  243. }
  244. timeout_error(flctl, __func__);
  245. }
  246. static enum flctl_ecc_res_t wait_recfifo_ready
  247. (struct sh_flctl *flctl, int sector_number)
  248. {
  249. uint32_t timeout = LOOP_TIMEOUT_MAX;
  250. void __iomem *ecc_reg[4];
  251. int i;
  252. int state = FL_SUCCESS;
  253. uint32_t data, size;
  254. /*
  255. * First this loops checks in FLDTCNTR if we are ready to read out the
  256. * oob data. This is the case if either all went fine without errors or
  257. * if the bottom part of the loop corrected the errors or marked them as
  258. * uncorrectable and the controller is given time to push the data into
  259. * the FIFO.
  260. */
  261. while (timeout--) {
  262. /* check if all is ok and we can read out the OOB */
  263. size = readl(FLDTCNTR(flctl)) >> 24;
  264. if ((size & 0xFF) == 4)
  265. return state;
  266. /* check if a correction code has been calculated */
  267. if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
  268. /*
  269. * either we wait for the fifo to be filled or a
  270. * correction pattern is being generated
  271. */
  272. udelay(1);
  273. continue;
  274. }
  275. /* check for an uncorrectable error */
  276. if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
  277. /* check if we face a non-empty page */
  278. for (i = 0; i < 512; i++) {
  279. if (flctl->done_buff[i] != 0xff) {
  280. state = FL_ERROR; /* can't correct */
  281. break;
  282. }
  283. }
  284. if (state == FL_SUCCESS)
  285. dev_dbg(&flctl->pdev->dev,
  286. "reading empty sector %d, ecc error ignored\n",
  287. sector_number);
  288. writel(0, FL4ECCCR(flctl));
  289. continue;
  290. }
  291. /* start error correction */
  292. ecc_reg[0] = FL4ECCRESULT0(flctl);
  293. ecc_reg[1] = FL4ECCRESULT1(flctl);
  294. ecc_reg[2] = FL4ECCRESULT2(flctl);
  295. ecc_reg[3] = FL4ECCRESULT3(flctl);
  296. for (i = 0; i < 3; i++) {
  297. uint8_t org;
  298. unsigned int index;
  299. data = readl(ecc_reg[i]);
  300. if (flctl->page_size)
  301. index = (512 * sector_number) +
  302. (data >> 16);
  303. else
  304. index = data >> 16;
  305. org = flctl->done_buff[index];
  306. flctl->done_buff[index] = org ^ (data & 0xFF);
  307. }
  308. state = FL_REPAIRABLE;
  309. writel(0, FL4ECCCR(flctl));
  310. }
  311. timeout_error(flctl, __func__);
  312. return FL_TIMEOUT; /* timeout */
  313. }
  314. static void wait_wecfifo_ready(struct sh_flctl *flctl)
  315. {
  316. uint32_t timeout = LOOP_TIMEOUT_MAX;
  317. uint32_t len;
  318. while (timeout--) {
  319. /* check FLECFIFO */
  320. len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
  321. if (len >= 4)
  322. return;
  323. udelay(1);
  324. }
  325. timeout_error(flctl, __func__);
  326. }
  327. static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
  328. int len, enum dma_data_direction dir)
  329. {
  330. struct dma_async_tx_descriptor *desc = NULL;
  331. struct dma_chan *chan;
  332. enum dma_transfer_direction tr_dir;
  333. dma_addr_t dma_addr;
  334. dma_cookie_t cookie;
  335. uint32_t reg;
  336. int ret;
  337. if (dir == DMA_FROM_DEVICE) {
  338. chan = flctl->chan_fifo0_rx;
  339. tr_dir = DMA_DEV_TO_MEM;
  340. } else {
  341. chan = flctl->chan_fifo0_tx;
  342. tr_dir = DMA_MEM_TO_DEV;
  343. }
  344. dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
  345. if (!dma_mapping_error(chan->device->dev, dma_addr))
  346. desc = dmaengine_prep_slave_single(chan, dma_addr, len,
  347. tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  348. if (desc) {
  349. reg = readl(FLINTDMACR(flctl));
  350. reg |= DREQ0EN;
  351. writel(reg, FLINTDMACR(flctl));
  352. desc->callback = flctl_dma_complete;
  353. desc->callback_param = flctl;
  354. cookie = dmaengine_submit(desc);
  355. if (dma_submit_error(cookie)) {
  356. ret = dma_submit_error(cookie);
  357. dev_warn(&flctl->pdev->dev,
  358. "DMA submit failed, falling back to PIO\n");
  359. goto out;
  360. }
  361. dma_async_issue_pending(chan);
  362. } else {
  363. /* DMA failed, fall back to PIO */
  364. flctl_release_dma(flctl);
  365. dev_warn(&flctl->pdev->dev,
  366. "DMA failed, falling back to PIO\n");
  367. ret = -EIO;
  368. goto out;
  369. }
  370. ret =
  371. wait_for_completion_timeout(&flctl->dma_complete,
  372. msecs_to_jiffies(3000));
  373. if (ret <= 0) {
  374. dmaengine_terminate_all(chan);
  375. dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
  376. }
  377. out:
  378. reg = readl(FLINTDMACR(flctl));
  379. reg &= ~DREQ0EN;
  380. writel(reg, FLINTDMACR(flctl));
  381. dma_unmap_single(chan->device->dev, dma_addr, len, dir);
  382. /* ret > 0 is success */
  383. return ret;
  384. }
  385. static void read_datareg(struct sh_flctl *flctl, int offset)
  386. {
  387. unsigned long data;
  388. unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
  389. wait_completion(flctl);
  390. data = readl(FLDATAR(flctl));
  391. *buf = le32_to_cpu(data);
  392. }
  393. static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
  394. {
  395. int i, len_4align;
  396. unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
  397. len_4align = (rlen + 3) / 4;
  398. /* initiate DMA transfer */
  399. if (flctl->chan_fifo0_rx && rlen >= 32 &&
  400. flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
  401. goto convert; /* DMA success */
  402. /* do polling transfer */
  403. for (i = 0; i < len_4align; i++) {
  404. wait_rfifo_ready(flctl);
  405. buf[i] = readl(FLDTFIFO(flctl));
  406. }
  407. convert:
  408. for (i = 0; i < len_4align; i++)
  409. buf[i] = be32_to_cpu(buf[i]);
  410. }
  411. static enum flctl_ecc_res_t read_ecfiforeg
  412. (struct sh_flctl *flctl, uint8_t *buff, int sector)
  413. {
  414. int i;
  415. enum flctl_ecc_res_t res;
  416. unsigned long *ecc_buf = (unsigned long *)buff;
  417. res = wait_recfifo_ready(flctl , sector);
  418. if (res != FL_ERROR) {
  419. for (i = 0; i < 4; i++) {
  420. ecc_buf[i] = readl(FLECFIFO(flctl));
  421. ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
  422. }
  423. }
  424. return res;
  425. }
  426. static void write_fiforeg(struct sh_flctl *flctl, int rlen,
  427. unsigned int offset)
  428. {
  429. int i, len_4align;
  430. unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
  431. len_4align = (rlen + 3) / 4;
  432. for (i = 0; i < len_4align; i++) {
  433. wait_wfifo_ready(flctl);
  434. writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
  435. }
  436. }
  437. static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
  438. unsigned int offset)
  439. {
  440. int i, len_4align;
  441. unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
  442. len_4align = (rlen + 3) / 4;
  443. for (i = 0; i < len_4align; i++)
  444. buf[i] = cpu_to_be32(buf[i]);
  445. /* initiate DMA transfer */
  446. if (flctl->chan_fifo0_tx && rlen >= 32 &&
  447. flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
  448. return; /* DMA success */
  449. /* do polling transfer */
  450. for (i = 0; i < len_4align; i++) {
  451. wait_wecfifo_ready(flctl);
  452. writel(buf[i], FLECFIFO(flctl));
  453. }
  454. }
  455. static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
  456. {
  457. struct sh_flctl *flctl = mtd_to_flctl(mtd);
  458. uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
  459. uint32_t flcmdcr_val, addr_len_bytes = 0;
  460. /* Set SNAND bit if page size is 2048byte */
  461. if (flctl->page_size)
  462. flcmncr_val |= SNAND_E;
  463. else
  464. flcmncr_val &= ~SNAND_E;
  465. /* default FLCMDCR val */
  466. flcmdcr_val = DOCMD1_E | DOADR_E;
  467. /* Set for FLCMDCR */
  468. switch (cmd) {
  469. case NAND_CMD_ERASE1:
  470. addr_len_bytes = flctl->erase_ADRCNT;
  471. flcmdcr_val |= DOCMD2_E;
  472. break;
  473. case NAND_CMD_READ0:
  474. case NAND_CMD_READOOB:
  475. case NAND_CMD_RNDOUT:
  476. addr_len_bytes = flctl->rw_ADRCNT;
  477. flcmdcr_val |= CDSRC_E;
  478. if (flctl->chip.options & NAND_BUSWIDTH_16)
  479. flcmncr_val |= SEL_16BIT;
  480. break;
  481. case NAND_CMD_SEQIN:
  482. /* This case is that cmd is READ0 or READ1 or READ00 */
  483. flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
  484. break;
  485. case NAND_CMD_PAGEPROG:
  486. addr_len_bytes = flctl->rw_ADRCNT;
  487. flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
  488. if (flctl->chip.options & NAND_BUSWIDTH_16)
  489. flcmncr_val |= SEL_16BIT;
  490. break;
  491. case NAND_CMD_READID:
  492. flcmncr_val &= ~SNAND_E;
  493. flcmdcr_val |= CDSRC_E;
  494. addr_len_bytes = ADRCNT_1;
  495. break;
  496. case NAND_CMD_STATUS:
  497. case NAND_CMD_RESET:
  498. flcmncr_val &= ~SNAND_E;
  499. flcmdcr_val &= ~(DOADR_E | DOSR_E);
  500. break;
  501. default:
  502. break;
  503. }
  504. /* Set address bytes parameter */
  505. flcmdcr_val |= addr_len_bytes;
  506. /* Now actually write */
  507. writel(flcmncr_val, FLCMNCR(flctl));
  508. writel(flcmdcr_val, FLCMDCR(flctl));
  509. writel(flcmcdr_val, FLCMCDR(flctl));
  510. }
  511. static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
  512. int oob_required, int page)
  513. {
  514. struct mtd_info *mtd = nand_to_mtd(chip);
  515. nand_read_page_op(chip, page, 0, buf, mtd->writesize);
  516. if (oob_required)
  517. chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
  518. return 0;
  519. }
  520. static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
  521. int oob_required, int page)
  522. {
  523. struct mtd_info *mtd = nand_to_mtd(chip);
  524. nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
  525. chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
  526. return nand_prog_page_end_op(chip);
  527. }
  528. static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
  529. {
  530. struct sh_flctl *flctl = mtd_to_flctl(mtd);
  531. int sector, page_sectors;
  532. enum flctl_ecc_res_t ecc_result;
  533. page_sectors = flctl->page_size ? 4 : 1;
  534. set_cmd_regs(mtd, NAND_CMD_READ0,
  535. (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
  536. writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
  537. FLCMNCR(flctl));
  538. writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
  539. writel(page_addr << 2, FLADR(flctl));
  540. empty_fifo(flctl);
  541. start_translation(flctl);
  542. for (sector = 0; sector < page_sectors; sector++) {
  543. read_fiforeg(flctl, 512, 512 * sector);
  544. ecc_result = read_ecfiforeg(flctl,
  545. &flctl->done_buff[mtd->writesize + 16 * sector],
  546. sector);
  547. switch (ecc_result) {
  548. case FL_REPAIRABLE:
  549. dev_info(&flctl->pdev->dev,
  550. "applied ecc on page 0x%x", page_addr);
  551. mtd->ecc_stats.corrected++;
  552. break;
  553. case FL_ERROR:
  554. dev_warn(&flctl->pdev->dev,
  555. "page 0x%x contains corrupted data\n",
  556. page_addr);
  557. mtd->ecc_stats.failed++;
  558. break;
  559. default:
  560. ;
  561. }
  562. }
  563. wait_completion(flctl);
  564. writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
  565. FLCMNCR(flctl));
  566. }
  567. static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
  568. {
  569. struct sh_flctl *flctl = mtd_to_flctl(mtd);
  570. int page_sectors = flctl->page_size ? 4 : 1;
  571. int i;
  572. set_cmd_regs(mtd, NAND_CMD_READ0,
  573. (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
  574. empty_fifo(flctl);
  575. for (i = 0; i < page_sectors; i++) {
  576. set_addr(mtd, (512 + 16) * i + 512 , page_addr);
  577. writel(16, FLDTCNTR(flctl));
  578. start_translation(flctl);
  579. read_fiforeg(flctl, 16, 16 * i);
  580. wait_completion(flctl);
  581. }
  582. }
  583. static void execmd_write_page_sector(struct mtd_info *mtd)
  584. {
  585. struct sh_flctl *flctl = mtd_to_flctl(mtd);
  586. int page_addr = flctl->seqin_page_addr;
  587. int sector, page_sectors;
  588. page_sectors = flctl->page_size ? 4 : 1;
  589. set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
  590. (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
  591. empty_fifo(flctl);
  592. writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
  593. writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
  594. writel(page_addr << 2, FLADR(flctl));
  595. start_translation(flctl);
  596. for (sector = 0; sector < page_sectors; sector++) {
  597. write_fiforeg(flctl, 512, 512 * sector);
  598. write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
  599. }
  600. wait_completion(flctl);
  601. writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
  602. }
  603. static void execmd_write_oob(struct mtd_info *mtd)
  604. {
  605. struct sh_flctl *flctl = mtd_to_flctl(mtd);
  606. int page_addr = flctl->seqin_page_addr;
  607. int sector, page_sectors;
  608. page_sectors = flctl->page_size ? 4 : 1;
  609. set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
  610. (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
  611. for (sector = 0; sector < page_sectors; sector++) {
  612. empty_fifo(flctl);
  613. set_addr(mtd, sector * 528 + 512, page_addr);
  614. writel(16, FLDTCNTR(flctl)); /* set read size */
  615. start_translation(flctl);
  616. write_fiforeg(flctl, 16, 16 * sector);
  617. wait_completion(flctl);
  618. }
  619. }
  620. static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command,
  621. int column, int page_addr)
  622. {
  623. struct mtd_info *mtd = nand_to_mtd(chip);
  624. struct sh_flctl *flctl = mtd_to_flctl(mtd);
  625. uint32_t read_cmd = 0;
  626. pm_runtime_get_sync(&flctl->pdev->dev);
  627. flctl->read_bytes = 0;
  628. if (command != NAND_CMD_PAGEPROG)
  629. flctl->index = 0;
  630. switch (command) {
  631. case NAND_CMD_READ1:
  632. case NAND_CMD_READ0:
  633. if (flctl->hwecc) {
  634. /* read page with hwecc */
  635. execmd_read_page_sector(mtd, page_addr);
  636. break;
  637. }
  638. if (flctl->page_size)
  639. set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
  640. | command);
  641. else
  642. set_cmd_regs(mtd, command, command);
  643. set_addr(mtd, 0, page_addr);
  644. flctl->read_bytes = mtd->writesize + mtd->oobsize;
  645. if (flctl->chip.options & NAND_BUSWIDTH_16)
  646. column >>= 1;
  647. flctl->index += column;
  648. goto read_normal_exit;
  649. case NAND_CMD_READOOB:
  650. if (flctl->hwecc) {
  651. /* read page with hwecc */
  652. execmd_read_oob(mtd, page_addr);
  653. break;
  654. }
  655. if (flctl->page_size) {
  656. set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
  657. | NAND_CMD_READ0);
  658. set_addr(mtd, mtd->writesize, page_addr);
  659. } else {
  660. set_cmd_regs(mtd, command, command);
  661. set_addr(mtd, 0, page_addr);
  662. }
  663. flctl->read_bytes = mtd->oobsize;
  664. goto read_normal_exit;
  665. case NAND_CMD_RNDOUT:
  666. if (flctl->hwecc)
  667. break;
  668. if (flctl->page_size)
  669. set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
  670. | command);
  671. else
  672. set_cmd_regs(mtd, command, command);
  673. set_addr(mtd, column, 0);
  674. flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
  675. goto read_normal_exit;
  676. case NAND_CMD_READID:
  677. set_cmd_regs(mtd, command, command);
  678. /* READID is always performed using an 8-bit bus */
  679. if (flctl->chip.options & NAND_BUSWIDTH_16)
  680. column <<= 1;
  681. set_addr(mtd, column, 0);
  682. flctl->read_bytes = 8;
  683. writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
  684. empty_fifo(flctl);
  685. start_translation(flctl);
  686. read_fiforeg(flctl, flctl->read_bytes, 0);
  687. wait_completion(flctl);
  688. break;
  689. case NAND_CMD_ERASE1:
  690. flctl->erase1_page_addr = page_addr;
  691. break;
  692. case NAND_CMD_ERASE2:
  693. set_cmd_regs(mtd, NAND_CMD_ERASE1,
  694. (command << 8) | NAND_CMD_ERASE1);
  695. set_addr(mtd, -1, flctl->erase1_page_addr);
  696. start_translation(flctl);
  697. wait_completion(flctl);
  698. break;
  699. case NAND_CMD_SEQIN:
  700. if (!flctl->page_size) {
  701. /* output read command */
  702. if (column >= mtd->writesize) {
  703. column -= mtd->writesize;
  704. read_cmd = NAND_CMD_READOOB;
  705. } else if (column < 256) {
  706. read_cmd = NAND_CMD_READ0;
  707. } else {
  708. column -= 256;
  709. read_cmd = NAND_CMD_READ1;
  710. }
  711. }
  712. flctl->seqin_column = column;
  713. flctl->seqin_page_addr = page_addr;
  714. flctl->seqin_read_cmd = read_cmd;
  715. break;
  716. case NAND_CMD_PAGEPROG:
  717. empty_fifo(flctl);
  718. if (!flctl->page_size) {
  719. set_cmd_regs(mtd, NAND_CMD_SEQIN,
  720. flctl->seqin_read_cmd);
  721. set_addr(mtd, -1, -1);
  722. writel(0, FLDTCNTR(flctl)); /* set 0 size */
  723. start_translation(flctl);
  724. wait_completion(flctl);
  725. }
  726. if (flctl->hwecc) {
  727. /* write page with hwecc */
  728. if (flctl->seqin_column == mtd->writesize)
  729. execmd_write_oob(mtd);
  730. else if (!flctl->seqin_column)
  731. execmd_write_page_sector(mtd);
  732. else
  733. pr_err("Invalid address !?\n");
  734. break;
  735. }
  736. set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
  737. set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
  738. writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
  739. start_translation(flctl);
  740. write_fiforeg(flctl, flctl->index, 0);
  741. wait_completion(flctl);
  742. break;
  743. case NAND_CMD_STATUS:
  744. set_cmd_regs(mtd, command, command);
  745. set_addr(mtd, -1, -1);
  746. flctl->read_bytes = 1;
  747. writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
  748. start_translation(flctl);
  749. read_datareg(flctl, 0); /* read and end */
  750. break;
  751. case NAND_CMD_RESET:
  752. set_cmd_regs(mtd, command, command);
  753. set_addr(mtd, -1, -1);
  754. writel(0, FLDTCNTR(flctl)); /* set 0 size */
  755. start_translation(flctl);
  756. wait_completion(flctl);
  757. break;
  758. default:
  759. break;
  760. }
  761. goto runtime_exit;
  762. read_normal_exit:
  763. writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
  764. empty_fifo(flctl);
  765. start_translation(flctl);
  766. read_fiforeg(flctl, flctl->read_bytes, 0);
  767. wait_completion(flctl);
  768. runtime_exit:
  769. pm_runtime_put_sync(&flctl->pdev->dev);
  770. return;
  771. }
  772. static void flctl_select_chip(struct nand_chip *chip, int chipnr)
  773. {
  774. struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
  775. int ret;
  776. switch (chipnr) {
  777. case -1:
  778. flctl->flcmncr_base &= ~CE0_ENABLE;
  779. pm_runtime_get_sync(&flctl->pdev->dev);
  780. writel(flctl->flcmncr_base, FLCMNCR(flctl));
  781. if (flctl->qos_request) {
  782. dev_pm_qos_remove_request(&flctl->pm_qos);
  783. flctl->qos_request = 0;
  784. }
  785. pm_runtime_put_sync(&flctl->pdev->dev);
  786. break;
  787. case 0:
  788. flctl->flcmncr_base |= CE0_ENABLE;
  789. if (!flctl->qos_request) {
  790. ret = dev_pm_qos_add_request(&flctl->pdev->dev,
  791. &flctl->pm_qos,
  792. DEV_PM_QOS_RESUME_LATENCY,
  793. 100);
  794. if (ret < 0)
  795. dev_err(&flctl->pdev->dev,
  796. "PM QoS request failed: %d\n", ret);
  797. flctl->qos_request = 1;
  798. }
  799. if (flctl->holden) {
  800. pm_runtime_get_sync(&flctl->pdev->dev);
  801. writel(HOLDEN, FLHOLDCR(flctl));
  802. pm_runtime_put_sync(&flctl->pdev->dev);
  803. }
  804. break;
  805. default:
  806. BUG();
  807. }
  808. }
  809. static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
  810. {
  811. struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
  812. memcpy(&flctl->done_buff[flctl->index], buf, len);
  813. flctl->index += len;
  814. }
  815. static uint8_t flctl_read_byte(struct nand_chip *chip)
  816. {
  817. struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
  818. uint8_t data;
  819. data = flctl->done_buff[flctl->index];
  820. flctl->index++;
  821. return data;
  822. }
  823. static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
  824. {
  825. struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
  826. memcpy(buf, &flctl->done_buff[flctl->index], len);
  827. flctl->index += len;
  828. }
  829. static int flctl_chip_attach_chip(struct nand_chip *chip)
  830. {
  831. struct mtd_info *mtd = nand_to_mtd(chip);
  832. struct sh_flctl *flctl = mtd_to_flctl(mtd);
  833. /*
  834. * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
  835. * Add the SEL_16BIT flag in flctl->flcmncr_base.
  836. */
  837. if (chip->options & NAND_BUSWIDTH_16)
  838. flctl->flcmncr_base |= SEL_16BIT;
  839. if (mtd->writesize == 512) {
  840. flctl->page_size = 0;
  841. if (chip->chipsize > (32 << 20)) {
  842. /* big than 32MB */
  843. flctl->rw_ADRCNT = ADRCNT_4;
  844. flctl->erase_ADRCNT = ADRCNT_3;
  845. } else if (chip->chipsize > (2 << 16)) {
  846. /* big than 128KB */
  847. flctl->rw_ADRCNT = ADRCNT_3;
  848. flctl->erase_ADRCNT = ADRCNT_2;
  849. } else {
  850. flctl->rw_ADRCNT = ADRCNT_2;
  851. flctl->erase_ADRCNT = ADRCNT_1;
  852. }
  853. } else {
  854. flctl->page_size = 1;
  855. if (chip->chipsize > (128 << 20)) {
  856. /* big than 128MB */
  857. flctl->rw_ADRCNT = ADRCNT2_E;
  858. flctl->erase_ADRCNT = ADRCNT_3;
  859. } else if (chip->chipsize > (8 << 16)) {
  860. /* big than 512KB */
  861. flctl->rw_ADRCNT = ADRCNT_4;
  862. flctl->erase_ADRCNT = ADRCNT_2;
  863. } else {
  864. flctl->rw_ADRCNT = ADRCNT_3;
  865. flctl->erase_ADRCNT = ADRCNT_1;
  866. }
  867. }
  868. if (flctl->hwecc) {
  869. if (mtd->writesize == 512) {
  870. mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
  871. chip->badblock_pattern = &flctl_4secc_smallpage;
  872. } else {
  873. mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
  874. chip->badblock_pattern = &flctl_4secc_largepage;
  875. }
  876. chip->ecc.size = 512;
  877. chip->ecc.bytes = 10;
  878. chip->ecc.strength = 4;
  879. chip->ecc.read_page = flctl_read_page_hwecc;
  880. chip->ecc.write_page = flctl_write_page_hwecc;
  881. chip->ecc.mode = NAND_ECC_HW;
  882. /* 4 symbols ECC enabled */
  883. flctl->flcmncr_base |= _4ECCEN;
  884. } else {
  885. chip->ecc.mode = NAND_ECC_SOFT;
  886. chip->ecc.algo = NAND_ECC_HAMMING;
  887. }
  888. return 0;
  889. }
  890. static const struct nand_controller_ops flctl_nand_controller_ops = {
  891. .attach_chip = flctl_chip_attach_chip,
  892. };
  893. static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
  894. {
  895. struct sh_flctl *flctl = dev_id;
  896. dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
  897. writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
  898. return IRQ_HANDLED;
  899. }
  900. struct flctl_soc_config {
  901. unsigned long flcmncr_val;
  902. unsigned has_hwecc:1;
  903. unsigned use_holden:1;
  904. };
  905. static struct flctl_soc_config flctl_sh7372_config = {
  906. .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
  907. .has_hwecc = 1,
  908. .use_holden = 1,
  909. };
  910. static const struct of_device_id of_flctl_match[] = {
  911. { .compatible = "renesas,shmobile-flctl-sh7372",
  912. .data = &flctl_sh7372_config },
  913. {},
  914. };
  915. MODULE_DEVICE_TABLE(of, of_flctl_match);
  916. static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
  917. {
  918. const struct flctl_soc_config *config;
  919. struct sh_flctl_platform_data *pdata;
  920. config = of_device_get_match_data(dev);
  921. if (!config) {
  922. dev_err(dev, "%s: no OF configuration attached\n", __func__);
  923. return NULL;
  924. }
  925. pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
  926. GFP_KERNEL);
  927. if (!pdata)
  928. return NULL;
  929. /* set SoC specific options */
  930. pdata->flcmncr_val = config->flcmncr_val;
  931. pdata->has_hwecc = config->has_hwecc;
  932. pdata->use_holden = config->use_holden;
  933. return pdata;
  934. }
  935. static int flctl_probe(struct platform_device *pdev)
  936. {
  937. struct resource *res;
  938. struct sh_flctl *flctl;
  939. struct mtd_info *flctl_mtd;
  940. struct nand_chip *nand;
  941. struct sh_flctl_platform_data *pdata;
  942. int ret;
  943. int irq;
  944. flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
  945. if (!flctl)
  946. return -ENOMEM;
  947. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  948. flctl->reg = devm_ioremap_resource(&pdev->dev, res);
  949. if (IS_ERR(flctl->reg))
  950. return PTR_ERR(flctl->reg);
  951. flctl->fifo = res->start + 0x24; /* FLDTFIFO */
  952. irq = platform_get_irq(pdev, 0);
  953. if (irq < 0) {
  954. dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq);
  955. return irq;
  956. }
  957. ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
  958. "flste", flctl);
  959. if (ret) {
  960. dev_err(&pdev->dev, "request interrupt failed.\n");
  961. return ret;
  962. }
  963. if (pdev->dev.of_node)
  964. pdata = flctl_parse_dt(&pdev->dev);
  965. else
  966. pdata = dev_get_platdata(&pdev->dev);
  967. if (!pdata) {
  968. dev_err(&pdev->dev, "no setup data defined\n");
  969. return -EINVAL;
  970. }
  971. platform_set_drvdata(pdev, flctl);
  972. nand = &flctl->chip;
  973. flctl_mtd = nand_to_mtd(nand);
  974. nand_set_flash_node(nand, pdev->dev.of_node);
  975. flctl_mtd->dev.parent = &pdev->dev;
  976. flctl->pdev = pdev;
  977. flctl->hwecc = pdata->has_hwecc;
  978. flctl->holden = pdata->use_holden;
  979. flctl->flcmncr_base = pdata->flcmncr_val;
  980. flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
  981. /* Set address of hardware control function */
  982. /* 20 us command delay time */
  983. nand->legacy.chip_delay = 20;
  984. nand->legacy.read_byte = flctl_read_byte;
  985. nand->legacy.write_buf = flctl_write_buf;
  986. nand->legacy.read_buf = flctl_read_buf;
  987. nand->select_chip = flctl_select_chip;
  988. nand->legacy.cmdfunc = flctl_cmdfunc;
  989. nand->legacy.set_features = nand_get_set_features_notsupp;
  990. nand->legacy.get_features = nand_get_set_features_notsupp;
  991. if (pdata->flcmncr_val & SEL_16BIT)
  992. nand->options |= NAND_BUSWIDTH_16;
  993. pm_runtime_enable(&pdev->dev);
  994. pm_runtime_resume(&pdev->dev);
  995. flctl_setup_dma(flctl);
  996. nand->dummy_controller.ops = &flctl_nand_controller_ops;
  997. ret = nand_scan(nand, 1);
  998. if (ret)
  999. goto err_chip;
  1000. ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
  1001. if (ret)
  1002. goto cleanup_nand;
  1003. return 0;
  1004. cleanup_nand:
  1005. nand_cleanup(nand);
  1006. err_chip:
  1007. flctl_release_dma(flctl);
  1008. pm_runtime_disable(&pdev->dev);
  1009. return ret;
  1010. }
  1011. static int flctl_remove(struct platform_device *pdev)
  1012. {
  1013. struct sh_flctl *flctl = platform_get_drvdata(pdev);
  1014. flctl_release_dma(flctl);
  1015. nand_release(&flctl->chip);
  1016. pm_runtime_disable(&pdev->dev);
  1017. return 0;
  1018. }
  1019. static struct platform_driver flctl_driver = {
  1020. .remove = flctl_remove,
  1021. .driver = {
  1022. .name = "sh_flctl",
  1023. .of_match_table = of_match_ptr(of_flctl_match),
  1024. },
  1025. };
  1026. module_platform_driver_probe(flctl_driver, flctl_probe);
  1027. MODULE_LICENSE("GPL");
  1028. MODULE_AUTHOR("Yoshihiro Shimoda");
  1029. MODULE_DESCRIPTION("SuperH FLCTL driver");
  1030. MODULE_ALIAS("platform:sh_flctl");