gpmi-lib.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Freescale GPMI NAND Flash Driver
  4. *
  5. * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
  6. * Copyright (C) 2008 Embedded Alley Solutions, Inc.
  7. */
  8. #include <linux/delay.h>
  9. #include <linux/clk.h>
  10. #include <linux/slab.h>
  11. #include "gpmi-nand.h"
  12. #include "gpmi-regs.h"
  13. #include "bch-regs.h"
  14. /* Converts time to clock cycles */
  15. #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
  16. #define MXS_SET_ADDR 0x4
  17. #define MXS_CLR_ADDR 0x8
  18. /*
  19. * Clear the bit and poll it cleared. This is usually called with
  20. * a reset address and mask being either SFTRST(bit 31) or CLKGATE
  21. * (bit 30).
  22. */
  23. static int clear_poll_bit(void __iomem *addr, u32 mask)
  24. {
  25. int timeout = 0x400;
  26. /* clear the bit */
  27. writel(mask, addr + MXS_CLR_ADDR);
  28. /*
  29. * SFTRST needs 3 GPMI clocks to settle, the reference manual
  30. * recommends to wait 1us.
  31. */
  32. udelay(1);
  33. /* poll the bit becoming clear */
  34. while ((readl(addr) & mask) && --timeout)
  35. /* nothing */;
  36. return !timeout;
  37. }
  38. #define MODULE_CLKGATE (1 << 30)
  39. #define MODULE_SFTRST (1 << 31)
  40. /*
  41. * The current mxs_reset_block() will do two things:
  42. * [1] enable the module.
  43. * [2] reset the module.
  44. *
  45. * In most of the cases, it's ok.
  46. * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
  47. * If you try to soft reset the BCH block, it becomes unusable until
  48. * the next hard reset. This case occurs in the NAND boot mode. When the board
  49. * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
  50. * So If the driver tries to reset the BCH again, the BCH will not work anymore.
  51. * You will see a DMA timeout in this case. The bug has been fixed
  52. * in the following chips, such as MX28.
  53. *
  54. * To avoid this bug, just add a new parameter `just_enable` for
  55. * the mxs_reset_block(), and rewrite it here.
  56. */
  57. static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
  58. {
  59. int ret;
  60. int timeout = 0x400;
  61. /* clear and poll SFTRST */
  62. ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
  63. if (unlikely(ret))
  64. goto error;
  65. /* clear CLKGATE */
  66. writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
  67. if (!just_enable) {
  68. /* set SFTRST to reset the block */
  69. writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
  70. udelay(1);
  71. /* poll CLKGATE becoming set */
  72. while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
  73. /* nothing */;
  74. if (unlikely(!timeout))
  75. goto error;
  76. }
  77. /* clear and poll SFTRST */
  78. ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
  79. if (unlikely(ret))
  80. goto error;
  81. /* clear and poll CLKGATE */
  82. ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
  83. if (unlikely(ret))
  84. goto error;
  85. return 0;
  86. error:
  87. pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
  88. return -ETIMEDOUT;
  89. }
  90. static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
  91. {
  92. struct clk *clk;
  93. int ret;
  94. int i;
  95. for (i = 0; i < GPMI_CLK_MAX; i++) {
  96. clk = this->resources.clock[i];
  97. if (!clk)
  98. break;
  99. if (v) {
  100. ret = clk_prepare_enable(clk);
  101. if (ret)
  102. goto err_clk;
  103. } else {
  104. clk_disable_unprepare(clk);
  105. }
  106. }
  107. return 0;
  108. err_clk:
  109. for (; i > 0; i--)
  110. clk_disable_unprepare(this->resources.clock[i - 1]);
  111. return ret;
  112. }
  113. int gpmi_enable_clk(struct gpmi_nand_data *this)
  114. {
  115. return __gpmi_enable_clk(this, true);
  116. }
  117. int gpmi_disable_clk(struct gpmi_nand_data *this)
  118. {
  119. return __gpmi_enable_clk(this, false);
  120. }
  121. int gpmi_init(struct gpmi_nand_data *this)
  122. {
  123. struct resources *r = &this->resources;
  124. int ret;
  125. ret = gpmi_enable_clk(this);
  126. if (ret)
  127. return ret;
  128. ret = gpmi_reset_block(r->gpmi_regs, false);
  129. if (ret)
  130. goto err_out;
  131. /*
  132. * Reset BCH here, too. We got failures otherwise :(
  133. * See later BCH reset for explanation of MX23 handling
  134. */
  135. ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
  136. if (ret)
  137. goto err_out;
  138. /* Choose NAND mode. */
  139. writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
  140. /* Set the IRQ polarity. */
  141. writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
  142. r->gpmi_regs + HW_GPMI_CTRL1_SET);
  143. /* Disable Write-Protection. */
  144. writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
  145. /* Select BCH ECC. */
  146. writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
  147. /*
  148. * Decouple the chip select from dma channel. We use dma0 for all
  149. * the chips.
  150. */
  151. writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
  152. gpmi_disable_clk(this);
  153. return 0;
  154. err_out:
  155. gpmi_disable_clk(this);
  156. return ret;
  157. }
  158. /* This function is very useful. It is called only when the bug occur. */
  159. void gpmi_dump_info(struct gpmi_nand_data *this)
  160. {
  161. struct resources *r = &this->resources;
  162. struct bch_geometry *geo = &this->bch_geometry;
  163. u32 reg;
  164. int i;
  165. dev_err(this->dev, "Show GPMI registers :\n");
  166. for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
  167. reg = readl(r->gpmi_regs + i * 0x10);
  168. dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
  169. }
  170. /* start to print out the BCH info */
  171. dev_err(this->dev, "Show BCH registers :\n");
  172. for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
  173. reg = readl(r->bch_regs + i * 0x10);
  174. dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
  175. }
  176. dev_err(this->dev, "BCH Geometry :\n"
  177. "GF length : %u\n"
  178. "ECC Strength : %u\n"
  179. "Page Size in Bytes : %u\n"
  180. "Metadata Size in Bytes : %u\n"
  181. "ECC Chunk Size in Bytes: %u\n"
  182. "ECC Chunk Count : %u\n"
  183. "Payload Size in Bytes : %u\n"
  184. "Auxiliary Size in Bytes: %u\n"
  185. "Auxiliary Status Offset: %u\n"
  186. "Block Mark Byte Offset : %u\n"
  187. "Block Mark Bit Offset : %u\n",
  188. geo->gf_len,
  189. geo->ecc_strength,
  190. geo->page_size,
  191. geo->metadata_size,
  192. geo->ecc_chunk_size,
  193. geo->ecc_chunk_count,
  194. geo->payload_size,
  195. geo->auxiliary_size,
  196. geo->auxiliary_status_offset,
  197. geo->block_mark_byte_offset,
  198. geo->block_mark_bit_offset);
  199. }
  200. /* Configures the geometry for BCH. */
  201. int bch_set_geometry(struct gpmi_nand_data *this)
  202. {
  203. struct resources *r = &this->resources;
  204. struct bch_geometry *bch_geo = &this->bch_geometry;
  205. unsigned int block_count;
  206. unsigned int block_size;
  207. unsigned int metadata_size;
  208. unsigned int ecc_strength;
  209. unsigned int page_size;
  210. unsigned int gf_len;
  211. int ret;
  212. ret = common_nfc_set_geometry(this);
  213. if (ret)
  214. return ret;
  215. block_count = bch_geo->ecc_chunk_count - 1;
  216. block_size = bch_geo->ecc_chunk_size;
  217. metadata_size = bch_geo->metadata_size;
  218. ecc_strength = bch_geo->ecc_strength >> 1;
  219. page_size = bch_geo->page_size;
  220. gf_len = bch_geo->gf_len;
  221. ret = gpmi_enable_clk(this);
  222. if (ret)
  223. return ret;
  224. /*
  225. * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
  226. * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
  227. * On the other hand, the MX28 needs the reset, because one case has been
  228. * seen where the BCH produced ECC errors constantly after 10000
  229. * consecutive reboots. The latter case has not been seen on the MX23
  230. * yet, still we don't know if it could happen there as well.
  231. */
  232. ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
  233. if (ret)
  234. goto err_out;
  235. /* Configure layout 0. */
  236. writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
  237. | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
  238. | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
  239. | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
  240. | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
  241. r->bch_regs + HW_BCH_FLASH0LAYOUT0);
  242. writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
  243. | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
  244. | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
  245. | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
  246. r->bch_regs + HW_BCH_FLASH0LAYOUT1);
  247. /* Set *all* chip selects to use layout 0. */
  248. writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
  249. /* Enable interrupts. */
  250. writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
  251. r->bch_regs + HW_BCH_CTRL_SET);
  252. gpmi_disable_clk(this);
  253. return 0;
  254. err_out:
  255. gpmi_disable_clk(this);
  256. return ret;
  257. }
  258. /*
  259. * <1> Firstly, we should know what's the GPMI-clock means.
  260. * The GPMI-clock is the internal clock in the gpmi nand controller.
  261. * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
  262. * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
  263. *
  264. * <2> Secondly, we should know what's the frequency on the nand chip pins.
  265. * The frequency on the nand chip pins is derived from the GPMI-clock.
  266. * We can get it from the following equation:
  267. *
  268. * F = G / (DS + DH)
  269. *
  270. * F : the frequency on the nand chip pins.
  271. * G : the GPMI clock, such as 100MHz.
  272. * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
  273. * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
  274. *
  275. * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
  276. * the nand EDO(extended Data Out) timing could be applied.
  277. * The GPMI implements a feedback read strobe to sample the read data.
  278. * The feedback read strobe can be delayed to support the nand EDO timing
  279. * where the read strobe may deasserts before the read data is valid, and
  280. * read data is valid for some time after read strobe.
  281. *
  282. * The following figure illustrates some aspects of a NAND Flash read:
  283. *
  284. * |<---tREA---->|
  285. * | |
  286. * | | |
  287. * |<--tRP-->| |
  288. * | | |
  289. * __ ___|__________________________________
  290. * RDN \________/ |
  291. * |
  292. * /---------\
  293. * Read Data --------------< >---------
  294. * \---------/
  295. * | |
  296. * |<-D->|
  297. * FeedbackRDN ________ ____________
  298. * \___________/
  299. *
  300. * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
  301. *
  302. *
  303. * <4> Now, we begin to describe how to compute the right RDN_DELAY.
  304. *
  305. * 4.1) From the aspect of the nand chip pins:
  306. * Delay = (tREA + C - tRP) {1}
  307. *
  308. * tREA : the maximum read access time.
  309. * C : a constant to adjust the delay. default is 4000ps.
  310. * tRP : the read pulse width, which is exactly:
  311. * tRP = (GPMI-clock-period) * DATA_SETUP
  312. *
  313. * 4.2) From the aspect of the GPMI nand controller:
  314. * Delay = RDN_DELAY * 0.125 * RP {2}
  315. *
  316. * RP : the DLL reference period.
  317. * if (GPMI-clock-period > DLL_THRETHOLD)
  318. * RP = GPMI-clock-period / 2;
  319. * else
  320. * RP = GPMI-clock-period;
  321. *
  322. * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
  323. * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
  324. * is 16000ps, but in mx6q, we use 12000ps.
  325. *
  326. * 4.3) since {1} equals {2}, we get:
  327. *
  328. * (tREA + 4000 - tRP) * 8
  329. * RDN_DELAY = ----------------------- {3}
  330. * RP
  331. */
  332. static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
  333. const struct nand_sdr_timings *sdr)
  334. {
  335. struct gpmi_nfc_hardware_timing *hw = &this->hw;
  336. unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
  337. unsigned int period_ps, reference_period_ps;
  338. unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
  339. unsigned int tRP_ps;
  340. bool use_half_period;
  341. int sample_delay_ps, sample_delay_factor;
  342. u16 busy_timeout_cycles;
  343. u8 wrn_dly_sel;
  344. if (sdr->tRC_min >= 30000) {
  345. /* ONFI non-EDO modes [0-3] */
  346. hw->clk_rate = 22000000;
  347. wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
  348. } else if (sdr->tRC_min >= 25000) {
  349. /* ONFI EDO mode 4 */
  350. hw->clk_rate = 80000000;
  351. wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
  352. } else {
  353. /* ONFI EDO mode 5 */
  354. hw->clk_rate = 100000000;
  355. wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
  356. }
  357. /* SDR core timings are given in picoseconds */
  358. period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
  359. addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
  360. data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
  361. data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
  362. busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
  363. hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
  364. BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
  365. BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
  366. hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
  367. /*
  368. * Derive NFC ideal delay from {3}:
  369. *
  370. * (tREA + 4000 - tRP) * 8
  371. * RDN_DELAY = -----------------------
  372. * RP
  373. */
  374. if (period_ps > dll_threshold_ps) {
  375. use_half_period = true;
  376. reference_period_ps = period_ps / 2;
  377. } else {
  378. use_half_period = false;
  379. reference_period_ps = period_ps;
  380. }
  381. tRP_ps = data_setup_cycles * period_ps;
  382. sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
  383. if (sample_delay_ps > 0)
  384. sample_delay_factor = sample_delay_ps / reference_period_ps;
  385. else
  386. sample_delay_factor = 0;
  387. hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
  388. if (sample_delay_factor)
  389. hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
  390. BM_GPMI_CTRL1_DLL_ENABLE |
  391. (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
  392. }
  393. void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
  394. {
  395. struct gpmi_nfc_hardware_timing *hw = &this->hw;
  396. struct resources *r = &this->resources;
  397. void __iomem *gpmi_regs = r->gpmi_regs;
  398. unsigned int dll_wait_time_us;
  399. clk_set_rate(r->clock[0], hw->clk_rate);
  400. writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
  401. writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
  402. /*
  403. * Clear several CTRL1 fields, DLL must be disabled when setting
  404. * RDN_DELAY or HALF_PERIOD.
  405. */
  406. writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
  407. writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
  408. /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
  409. dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
  410. if (!dll_wait_time_us)
  411. dll_wait_time_us = 1;
  412. /* Wait for the DLL to settle. */
  413. udelay(dll_wait_time_us);
  414. }
  415. int gpmi_setup_data_interface(struct mtd_info *mtd, int chipnr,
  416. const struct nand_data_interface *conf)
  417. {
  418. struct nand_chip *chip = mtd_to_nand(mtd);
  419. struct gpmi_nand_data *this = nand_get_controller_data(chip);
  420. const struct nand_sdr_timings *sdr;
  421. /* Retrieve required NAND timings */
  422. sdr = nand_get_sdr_timings(conf);
  423. if (IS_ERR(sdr))
  424. return PTR_ERR(sdr);
  425. /* Only MX6 GPMI controller can reach EDO timings */
  426. if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
  427. return -ENOTSUPP;
  428. /* Stop here if this call was just a check */
  429. if (chipnr < 0)
  430. return 0;
  431. /* Do the actual derivation of the controller timings */
  432. gpmi_nfc_compute_timings(this, sdr);
  433. this->hw.must_apply_timings = true;
  434. return 0;
  435. }
  436. /* Clears a BCH interrupt. */
  437. void gpmi_clear_bch(struct gpmi_nand_data *this)
  438. {
  439. struct resources *r = &this->resources;
  440. writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
  441. }
  442. /* Returns the Ready/Busy status of the given chip. */
  443. int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
  444. {
  445. struct resources *r = &this->resources;
  446. uint32_t mask = 0;
  447. uint32_t reg = 0;
  448. if (GPMI_IS_MX23(this)) {
  449. mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
  450. reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
  451. } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
  452. /*
  453. * In the imx6, all the ready/busy pins are bound
  454. * together. So we only need to check chip 0.
  455. */
  456. if (GPMI_IS_MX6(this))
  457. chip = 0;
  458. /* MX28 shares the same R/B register as MX6Q. */
  459. mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
  460. reg = readl(r->gpmi_regs + HW_GPMI_STAT);
  461. } else
  462. dev_err(this->dev, "unknown arch.\n");
  463. return reg & mask;
  464. }
  465. int gpmi_send_command(struct gpmi_nand_data *this)
  466. {
  467. struct dma_chan *channel = get_dma_chan(this);
  468. struct dma_async_tx_descriptor *desc;
  469. struct scatterlist *sgl;
  470. int chip = this->current_chip;
  471. int ret;
  472. u32 pio[3];
  473. /* [1] send out the PIO words */
  474. pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
  475. | BM_GPMI_CTRL0_WORD_LENGTH
  476. | BF_GPMI_CTRL0_CS(chip, this)
  477. | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
  478. | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
  479. | BM_GPMI_CTRL0_ADDRESS_INCREMENT
  480. | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
  481. pio[1] = pio[2] = 0;
  482. desc = dmaengine_prep_slave_sg(channel,
  483. (struct scatterlist *)pio,
  484. ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
  485. if (!desc)
  486. return -EINVAL;
  487. /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
  488. sgl = &this->cmd_sgl;
  489. sg_init_one(sgl, this->cmd_buffer, this->command_length);
  490. dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
  491. desc = dmaengine_prep_slave_sg(channel,
  492. sgl, 1, DMA_MEM_TO_DEV,
  493. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  494. if (!desc)
  495. return -EINVAL;
  496. /* [3] submit the DMA */
  497. ret = start_dma_without_bch_irq(this, desc);
  498. dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
  499. return ret;
  500. }
  501. int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len)
  502. {
  503. struct dma_async_tx_descriptor *desc;
  504. struct dma_chan *channel = get_dma_chan(this);
  505. int chip = this->current_chip;
  506. int ret;
  507. uint32_t command_mode;
  508. uint32_t address;
  509. u32 pio[2];
  510. /* [1] PIO */
  511. command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
  512. address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
  513. pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
  514. | BM_GPMI_CTRL0_WORD_LENGTH
  515. | BF_GPMI_CTRL0_CS(chip, this)
  516. | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
  517. | BF_GPMI_CTRL0_ADDRESS(address)
  518. | BF_GPMI_CTRL0_XFER_COUNT(len);
  519. pio[1] = 0;
  520. desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
  521. ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
  522. if (!desc)
  523. return -EINVAL;
  524. /* [2] send DMA request */
  525. prepare_data_dma(this, buf, len, DMA_TO_DEVICE);
  526. desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
  527. 1, DMA_MEM_TO_DEV,
  528. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  529. if (!desc)
  530. return -EINVAL;
  531. /* [3] submit the DMA */
  532. ret = start_dma_without_bch_irq(this, desc);
  533. dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
  534. return ret;
  535. }
  536. int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len)
  537. {
  538. struct dma_async_tx_descriptor *desc;
  539. struct dma_chan *channel = get_dma_chan(this);
  540. int chip = this->current_chip;
  541. int ret;
  542. u32 pio[2];
  543. bool direct;
  544. /* [1] : send PIO */
  545. pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
  546. | BM_GPMI_CTRL0_WORD_LENGTH
  547. | BF_GPMI_CTRL0_CS(chip, this)
  548. | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
  549. | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
  550. | BF_GPMI_CTRL0_XFER_COUNT(len);
  551. pio[1] = 0;
  552. desc = dmaengine_prep_slave_sg(channel,
  553. (struct scatterlist *)pio,
  554. ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
  555. if (!desc)
  556. return -EINVAL;
  557. /* [2] : send DMA request */
  558. direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE);
  559. desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
  560. 1, DMA_DEV_TO_MEM,
  561. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  562. if (!desc)
  563. return -EINVAL;
  564. /* [3] : submit the DMA */
  565. ret = start_dma_without_bch_irq(this, desc);
  566. dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
  567. if (!direct)
  568. memcpy(buf, this->data_buffer_dma, len);
  569. return ret;
  570. }
  571. int gpmi_send_page(struct gpmi_nand_data *this,
  572. dma_addr_t payload, dma_addr_t auxiliary)
  573. {
  574. struct bch_geometry *geo = &this->bch_geometry;
  575. uint32_t command_mode;
  576. uint32_t address;
  577. uint32_t ecc_command;
  578. uint32_t buffer_mask;
  579. struct dma_async_tx_descriptor *desc;
  580. struct dma_chan *channel = get_dma_chan(this);
  581. int chip = this->current_chip;
  582. u32 pio[6];
  583. /* A DMA descriptor that does an ECC page read. */
  584. command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
  585. address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
  586. ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
  587. buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
  588. BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
  589. pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
  590. | BM_GPMI_CTRL0_WORD_LENGTH
  591. | BF_GPMI_CTRL0_CS(chip, this)
  592. | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
  593. | BF_GPMI_CTRL0_ADDRESS(address)
  594. | BF_GPMI_CTRL0_XFER_COUNT(0);
  595. pio[1] = 0;
  596. pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
  597. | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
  598. | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
  599. pio[3] = geo->page_size;
  600. pio[4] = payload;
  601. pio[5] = auxiliary;
  602. desc = dmaengine_prep_slave_sg(channel,
  603. (struct scatterlist *)pio,
  604. ARRAY_SIZE(pio), DMA_TRANS_NONE,
  605. DMA_CTRL_ACK);
  606. if (!desc)
  607. return -EINVAL;
  608. return start_dma_with_bch_irq(this, desc);
  609. }
  610. int gpmi_read_page(struct gpmi_nand_data *this,
  611. dma_addr_t payload, dma_addr_t auxiliary)
  612. {
  613. struct bch_geometry *geo = &this->bch_geometry;
  614. uint32_t command_mode;
  615. uint32_t address;
  616. uint32_t ecc_command;
  617. uint32_t buffer_mask;
  618. struct dma_async_tx_descriptor *desc;
  619. struct dma_chan *channel = get_dma_chan(this);
  620. int chip = this->current_chip;
  621. u32 pio[6];
  622. /* [1] Wait for the chip to report ready. */
  623. command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
  624. address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
  625. pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
  626. | BM_GPMI_CTRL0_WORD_LENGTH
  627. | BF_GPMI_CTRL0_CS(chip, this)
  628. | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
  629. | BF_GPMI_CTRL0_ADDRESS(address)
  630. | BF_GPMI_CTRL0_XFER_COUNT(0);
  631. pio[1] = 0;
  632. desc = dmaengine_prep_slave_sg(channel,
  633. (struct scatterlist *)pio, 2,
  634. DMA_TRANS_NONE, 0);
  635. if (!desc)
  636. return -EINVAL;
  637. /* [2] Enable the BCH block and read. */
  638. command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
  639. address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
  640. ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
  641. buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
  642. | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
  643. pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
  644. | BM_GPMI_CTRL0_WORD_LENGTH
  645. | BF_GPMI_CTRL0_CS(chip, this)
  646. | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
  647. | BF_GPMI_CTRL0_ADDRESS(address)
  648. | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
  649. pio[1] = 0;
  650. pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
  651. | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
  652. | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
  653. pio[3] = geo->page_size;
  654. pio[4] = payload;
  655. pio[5] = auxiliary;
  656. desc = dmaengine_prep_slave_sg(channel,
  657. (struct scatterlist *)pio,
  658. ARRAY_SIZE(pio), DMA_TRANS_NONE,
  659. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  660. if (!desc)
  661. return -EINVAL;
  662. /* [3] Disable the BCH block */
  663. command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
  664. address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
  665. pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
  666. | BM_GPMI_CTRL0_WORD_LENGTH
  667. | BF_GPMI_CTRL0_CS(chip, this)
  668. | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
  669. | BF_GPMI_CTRL0_ADDRESS(address)
  670. | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
  671. pio[1] = 0;
  672. pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
  673. desc = dmaengine_prep_slave_sg(channel,
  674. (struct scatterlist *)pio, 3,
  675. DMA_TRANS_NONE,
  676. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  677. if (!desc)
  678. return -EINVAL;
  679. /* [4] submit the DMA */
  680. return start_dma_with_bch_irq(this, desc);
  681. }
  682. /**
  683. * gpmi_copy_bits - copy bits from one memory region to another
  684. * @dst: destination buffer
  685. * @dst_bit_off: bit offset we're starting to write at
  686. * @src: source buffer
  687. * @src_bit_off: bit offset we're starting to read from
  688. * @nbits: number of bits to copy
  689. *
  690. * This functions copies bits from one memory region to another, and is used by
  691. * the GPMI driver to copy ECC sections which are not guaranteed to be byte
  692. * aligned.
  693. *
  694. * src and dst should not overlap.
  695. *
  696. */
  697. void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
  698. const u8 *src, size_t src_bit_off,
  699. size_t nbits)
  700. {
  701. size_t i;
  702. size_t nbytes;
  703. u32 src_buffer = 0;
  704. size_t bits_in_src_buffer = 0;
  705. if (!nbits)
  706. return;
  707. /*
  708. * Move src and dst pointers to the closest byte pointer and store bit
  709. * offsets within a byte.
  710. */
  711. src += src_bit_off / 8;
  712. src_bit_off %= 8;
  713. dst += dst_bit_off / 8;
  714. dst_bit_off %= 8;
  715. /*
  716. * Initialize the src_buffer value with bits available in the first
  717. * byte of data so that we end up with a byte aligned src pointer.
  718. */
  719. if (src_bit_off) {
  720. src_buffer = src[0] >> src_bit_off;
  721. if (nbits >= (8 - src_bit_off)) {
  722. bits_in_src_buffer += 8 - src_bit_off;
  723. } else {
  724. src_buffer &= GENMASK(nbits - 1, 0);
  725. bits_in_src_buffer += nbits;
  726. }
  727. nbits -= bits_in_src_buffer;
  728. src++;
  729. }
  730. /* Calculate the number of bytes that can be copied from src to dst. */
  731. nbytes = nbits / 8;
  732. /* Try to align dst to a byte boundary. */
  733. if (dst_bit_off) {
  734. if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
  735. src_buffer |= src[0] << bits_in_src_buffer;
  736. bits_in_src_buffer += 8;
  737. src++;
  738. nbytes--;
  739. }
  740. if (bits_in_src_buffer >= (8 - dst_bit_off)) {
  741. dst[0] &= GENMASK(dst_bit_off - 1, 0);
  742. dst[0] |= src_buffer << dst_bit_off;
  743. src_buffer >>= (8 - dst_bit_off);
  744. bits_in_src_buffer -= (8 - dst_bit_off);
  745. dst_bit_off = 0;
  746. dst++;
  747. if (bits_in_src_buffer > 7) {
  748. bits_in_src_buffer -= 8;
  749. dst[0] = src_buffer;
  750. dst++;
  751. src_buffer >>= 8;
  752. }
  753. }
  754. }
  755. if (!bits_in_src_buffer && !dst_bit_off) {
  756. /*
  757. * Both src and dst pointers are byte aligned, thus we can
  758. * just use the optimized memcpy function.
  759. */
  760. if (nbytes)
  761. memcpy(dst, src, nbytes);
  762. } else {
  763. /*
  764. * src buffer is not byte aligned, hence we have to copy each
  765. * src byte to the src_buffer variable before extracting a byte
  766. * to store in dst.
  767. */
  768. for (i = 0; i < nbytes; i++) {
  769. src_buffer |= src[i] << bits_in_src_buffer;
  770. dst[i] = src_buffer;
  771. src_buffer >>= 8;
  772. }
  773. }
  774. /* Update dst and src pointers */
  775. dst += nbytes;
  776. src += nbytes;
  777. /*
  778. * nbits is the number of remaining bits. It should not exceed 8 as
  779. * we've already copied as much bytes as possible.
  780. */
  781. nbits %= 8;
  782. /*
  783. * If there's no more bits to copy to the destination and src buffer
  784. * was already byte aligned, then we're done.
  785. */
  786. if (!nbits && !bits_in_src_buffer)
  787. return;
  788. /* Copy the remaining bits to src_buffer */
  789. if (nbits)
  790. src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
  791. bits_in_src_buffer;
  792. bits_in_src_buffer += nbits;
  793. /*
  794. * In case there were not enough bits to get a byte aligned dst buffer
  795. * prepare the src_buffer variable to match the dst organization (shift
  796. * src_buffer by dst_bit_off and retrieve the least significant bits
  797. * from dst).
  798. */
  799. if (dst_bit_off)
  800. src_buffer = (src_buffer << dst_bit_off) |
  801. (*dst & GENMASK(dst_bit_off - 1, 0));
  802. bits_in_src_buffer += dst_bit_off;
  803. /*
  804. * Keep most significant bits from dst if we end up with an unaligned
  805. * number of bits.
  806. */
  807. nbytes = bits_in_src_buffer / 8;
  808. if (bits_in_src_buffer % 8) {
  809. src_buffer |= (dst[nbytes] &
  810. GENMASK(7, bits_in_src_buffer % 8)) <<
  811. (nbytes * 8);
  812. nbytes++;
  813. }
  814. /* Copy the remaining bytes to dst */
  815. for (i = 0; i < nbytes; i++) {
  816. dst[i] = src_buffer;
  817. src_buffer >>= 8;
  818. }
  819. }