denali.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright © 2009-2010, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include <linux/interrupt.h>
  20. #include <linux/delay.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/wait.h>
  23. #include <linux/mutex.h>
  24. #include <linux/mtd/mtd.h>
  25. #include <linux/module.h>
  26. #include <linux/slab.h>
  27. #include "denali.h"
  28. MODULE_LICENSE("GPL");
  29. #define DENALI_NAND_NAME "denali-nand"
  30. /* Host Data/Command Interface */
  31. #define DENALI_HOST_ADDR 0x00
  32. #define DENALI_HOST_DATA 0x10
  33. #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
  34. #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
  35. #define DENALI_MAP10 (2 << 26) /* high-level control plane */
  36. #define DENALI_MAP11 (3 << 26) /* direct controller access */
  37. /* MAP11 access cycle type */
  38. #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
  39. #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
  40. #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
  41. /* MAP10 commands */
  42. #define DENALI_ERASE 0x01
  43. #define DENALI_BANK(denali) ((denali)->active_bank << 24)
  44. #define DENALI_INVALID_BANK -1
  45. #define DENALI_NR_BANKS 4
  46. /*
  47. * The bus interface clock, clk_x, is phase aligned with the core clock. The
  48. * clk_x is an integral multiple N of the core clk. The value N is configured
  49. * at IP delivery time, and its available value is 4, 5, or 6. We need to align
  50. * to the largest value to make it work with any possible configuration.
  51. */
  52. #define DENALI_CLK_X_MULT 6
  53. /*
  54. * this macro allows us to convert from an MTD structure to our own
  55. * device context (denali) structure.
  56. */
  57. static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  58. {
  59. return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  60. }
  61. static void denali_host_write(struct denali_nand_info *denali,
  62. uint32_t addr, uint32_t data)
  63. {
  64. iowrite32(addr, denali->host + DENALI_HOST_ADDR);
  65. iowrite32(data, denali->host + DENALI_HOST_DATA);
  66. }
  67. /*
  68. * Use the configuration feature register to determine the maximum number of
  69. * banks that the hardware supports.
  70. */
  71. static void detect_max_banks(struct denali_nand_info *denali)
  72. {
  73. uint32_t features = ioread32(denali->reg + FEATURES);
  74. denali->max_banks = 1 << (features & FEATURES__N_BANKS);
  75. /* the encoding changed from rev 5.0 to 5.1 */
  76. if (denali->revision < 0x0501)
  77. denali->max_banks <<= 1;
  78. }
  79. static void denali_enable_irq(struct denali_nand_info *denali)
  80. {
  81. int i;
  82. for (i = 0; i < DENALI_NR_BANKS; i++)
  83. iowrite32(U32_MAX, denali->reg + INTR_EN(i));
  84. iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
  85. }
  86. static void denali_disable_irq(struct denali_nand_info *denali)
  87. {
  88. int i;
  89. for (i = 0; i < DENALI_NR_BANKS; i++)
  90. iowrite32(0, denali->reg + INTR_EN(i));
  91. iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
  92. }
  93. static void denali_clear_irq(struct denali_nand_info *denali,
  94. int bank, uint32_t irq_status)
  95. {
  96. /* write one to clear bits */
  97. iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
  98. }
  99. static void denali_clear_irq_all(struct denali_nand_info *denali)
  100. {
  101. int i;
  102. for (i = 0; i < DENALI_NR_BANKS; i++)
  103. denali_clear_irq(denali, i, U32_MAX);
  104. }
  105. static irqreturn_t denali_isr(int irq, void *dev_id)
  106. {
  107. struct denali_nand_info *denali = dev_id;
  108. irqreturn_t ret = IRQ_NONE;
  109. uint32_t irq_status;
  110. int i;
  111. spin_lock(&denali->irq_lock);
  112. for (i = 0; i < DENALI_NR_BANKS; i++) {
  113. irq_status = ioread32(denali->reg + INTR_STATUS(i));
  114. if (irq_status)
  115. ret = IRQ_HANDLED;
  116. denali_clear_irq(denali, i, irq_status);
  117. if (i != denali->active_bank)
  118. continue;
  119. denali->irq_status |= irq_status;
  120. if (denali->irq_status & denali->irq_mask)
  121. complete(&denali->complete);
  122. }
  123. spin_unlock(&denali->irq_lock);
  124. return ret;
  125. }
  126. static void denali_reset_irq(struct denali_nand_info *denali)
  127. {
  128. unsigned long flags;
  129. spin_lock_irqsave(&denali->irq_lock, flags);
  130. denali->irq_status = 0;
  131. denali->irq_mask = 0;
  132. spin_unlock_irqrestore(&denali->irq_lock, flags);
  133. }
  134. static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
  135. uint32_t irq_mask)
  136. {
  137. unsigned long time_left, flags;
  138. uint32_t irq_status;
  139. spin_lock_irqsave(&denali->irq_lock, flags);
  140. irq_status = denali->irq_status;
  141. if (irq_mask & irq_status) {
  142. /* return immediately if the IRQ has already happened. */
  143. spin_unlock_irqrestore(&denali->irq_lock, flags);
  144. return irq_status;
  145. }
  146. denali->irq_mask = irq_mask;
  147. reinit_completion(&denali->complete);
  148. spin_unlock_irqrestore(&denali->irq_lock, flags);
  149. time_left = wait_for_completion_timeout(&denali->complete,
  150. msecs_to_jiffies(1000));
  151. if (!time_left) {
  152. dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
  153. denali->irq_mask);
  154. return 0;
  155. }
  156. return denali->irq_status;
  157. }
  158. static uint32_t denali_check_irq(struct denali_nand_info *denali)
  159. {
  160. unsigned long flags;
  161. uint32_t irq_status;
  162. spin_lock_irqsave(&denali->irq_lock, flags);
  163. irq_status = denali->irq_status;
  164. spin_unlock_irqrestore(&denali->irq_lock, flags);
  165. return irq_status;
  166. }
  167. /*
  168. * This helper function setups the registers for ECC and whether or not
  169. * the spare area will be transferred.
  170. */
  171. static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
  172. bool transfer_spare)
  173. {
  174. int ecc_en_flag, transfer_spare_flag;
  175. /* set ECC, transfer spare bits if needed */
  176. ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
  177. transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
  178. /* Enable spare area/ECC per user's request. */
  179. iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
  180. iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
  181. }
  182. static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  183. {
  184. struct denali_nand_info *denali = mtd_to_denali(mtd);
  185. int i;
  186. iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
  187. denali->host + DENALI_HOST_ADDR);
  188. for (i = 0; i < len; i++)
  189. buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
  190. }
  191. static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  192. {
  193. struct denali_nand_info *denali = mtd_to_denali(mtd);
  194. int i;
  195. iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
  196. denali->host + DENALI_HOST_ADDR);
  197. for (i = 0; i < len; i++)
  198. iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
  199. }
  200. static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
  201. {
  202. struct denali_nand_info *denali = mtd_to_denali(mtd);
  203. uint16_t *buf16 = (uint16_t *)buf;
  204. int i;
  205. iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
  206. denali->host + DENALI_HOST_ADDR);
  207. for (i = 0; i < len / 2; i++)
  208. buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
  209. }
  210. static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
  211. int len)
  212. {
  213. struct denali_nand_info *denali = mtd_to_denali(mtd);
  214. const uint16_t *buf16 = (const uint16_t *)buf;
  215. int i;
  216. iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
  217. denali->host + DENALI_HOST_ADDR);
  218. for (i = 0; i < len / 2; i++)
  219. iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
  220. }
  221. static uint8_t denali_read_byte(struct mtd_info *mtd)
  222. {
  223. uint8_t byte;
  224. denali_read_buf(mtd, &byte, 1);
  225. return byte;
  226. }
  227. static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
  228. {
  229. denali_write_buf(mtd, &byte, 1);
  230. }
  231. static uint16_t denali_read_word(struct mtd_info *mtd)
  232. {
  233. uint16_t word;
  234. denali_read_buf16(mtd, (uint8_t *)&word, 2);
  235. return word;
  236. }
  237. static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
  238. {
  239. struct denali_nand_info *denali = mtd_to_denali(mtd);
  240. uint32_t type;
  241. if (ctrl & NAND_CLE)
  242. type = DENALI_MAP11_CMD;
  243. else if (ctrl & NAND_ALE)
  244. type = DENALI_MAP11_ADDR;
  245. else
  246. return;
  247. /*
  248. * Some commands are followed by chip->dev_ready or chip->waitfunc.
  249. * irq_status must be cleared here to catch the R/B# interrupt later.
  250. */
  251. if (ctrl & NAND_CTRL_CHANGE)
  252. denali_reset_irq(denali);
  253. denali_host_write(denali, DENALI_BANK(denali) | type, dat);
  254. }
  255. static int denali_dev_ready(struct mtd_info *mtd)
  256. {
  257. struct denali_nand_info *denali = mtd_to_denali(mtd);
  258. return !!(denali_check_irq(denali) & INTR__INT_ACT);
  259. }
  260. static int denali_check_erased_page(struct mtd_info *mtd,
  261. struct nand_chip *chip, uint8_t *buf,
  262. unsigned long uncor_ecc_flags,
  263. unsigned int max_bitflips)
  264. {
  265. uint8_t *ecc_code = chip->buffers->ecccode;
  266. int ecc_steps = chip->ecc.steps;
  267. int ecc_size = chip->ecc.size;
  268. int ecc_bytes = chip->ecc.bytes;
  269. int i, ret, stat;
  270. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  271. chip->ecc.total);
  272. if (ret)
  273. return ret;
  274. for (i = 0; i < ecc_steps; i++) {
  275. if (!(uncor_ecc_flags & BIT(i)))
  276. continue;
  277. stat = nand_check_erased_ecc_chunk(buf, ecc_size,
  278. ecc_code, ecc_bytes,
  279. NULL, 0,
  280. chip->ecc.strength);
  281. if (stat < 0) {
  282. mtd->ecc_stats.failed++;
  283. } else {
  284. mtd->ecc_stats.corrected += stat;
  285. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  286. }
  287. buf += ecc_size;
  288. ecc_code += ecc_bytes;
  289. }
  290. return max_bitflips;
  291. }
  292. static int denali_hw_ecc_fixup(struct mtd_info *mtd,
  293. struct denali_nand_info *denali,
  294. unsigned long *uncor_ecc_flags)
  295. {
  296. struct nand_chip *chip = mtd_to_nand(mtd);
  297. int bank = denali->active_bank;
  298. uint32_t ecc_cor;
  299. unsigned int max_bitflips;
  300. ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
  301. ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
  302. if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
  303. /*
  304. * This flag is set when uncorrectable error occurs at least in
  305. * one ECC sector. We can not know "how many sectors", or
  306. * "which sector(s)". We need erase-page check for all sectors.
  307. */
  308. *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
  309. return 0;
  310. }
  311. max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
  312. /*
  313. * The register holds the maximum of per-sector corrected bitflips.
  314. * This is suitable for the return value of the ->read_page() callback.
  315. * Unfortunately, we can not know the total number of corrected bits in
  316. * the page. Increase the stats by max_bitflips. (compromised solution)
  317. */
  318. mtd->ecc_stats.corrected += max_bitflips;
  319. return max_bitflips;
  320. }
  321. #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
  322. #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
  323. #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
  324. #define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
  325. #define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
  326. #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
  327. static int denali_sw_ecc_fixup(struct mtd_info *mtd,
  328. struct denali_nand_info *denali,
  329. unsigned long *uncor_ecc_flags, uint8_t *buf)
  330. {
  331. unsigned int ecc_size = denali->nand.ecc.size;
  332. unsigned int bitflips = 0;
  333. unsigned int max_bitflips = 0;
  334. uint32_t err_addr, err_cor_info;
  335. unsigned int err_byte, err_sector, err_device;
  336. uint8_t err_cor_value;
  337. unsigned int prev_sector = 0;
  338. uint32_t irq_status;
  339. denali_reset_irq(denali);
  340. do {
  341. err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
  342. err_sector = ECC_SECTOR(err_addr);
  343. err_byte = ECC_BYTE(err_addr);
  344. err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
  345. err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
  346. err_device = ECC_ERR_DEVICE(err_cor_info);
  347. /* reset the bitflip counter when crossing ECC sector */
  348. if (err_sector != prev_sector)
  349. bitflips = 0;
  350. if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
  351. /*
  352. * Check later if this is a real ECC error, or
  353. * an erased sector.
  354. */
  355. *uncor_ecc_flags |= BIT(err_sector);
  356. } else if (err_byte < ecc_size) {
  357. /*
  358. * If err_byte is larger than ecc_size, means error
  359. * happened in OOB, so we ignore it. It's no need for
  360. * us to correct it err_device is represented the NAND
  361. * error bits are happened in if there are more than
  362. * one NAND connected.
  363. */
  364. int offset;
  365. unsigned int flips_in_byte;
  366. offset = (err_sector * ecc_size + err_byte) *
  367. denali->devs_per_cs + err_device;
  368. /* correct the ECC error */
  369. flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
  370. buf[offset] ^= err_cor_value;
  371. mtd->ecc_stats.corrected += flips_in_byte;
  372. bitflips += flips_in_byte;
  373. max_bitflips = max(max_bitflips, bitflips);
  374. }
  375. prev_sector = err_sector;
  376. } while (!ECC_LAST_ERR(err_cor_info));
  377. /*
  378. * Once handle all ecc errors, controller will trigger a
  379. * ECC_TRANSACTION_DONE interrupt, so here just wait for
  380. * a while for this interrupt
  381. */
  382. irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
  383. if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
  384. return -EIO;
  385. return max_bitflips;
  386. }
  387. /* programs the controller to either enable/disable DMA transfers */
  388. static void denali_enable_dma(struct denali_nand_info *denali, bool en)
  389. {
  390. iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
  391. ioread32(denali->reg + DMA_ENABLE);
  392. }
  393. static void denali_setup_dma64(struct denali_nand_info *denali,
  394. dma_addr_t dma_addr, int page, int write)
  395. {
  396. uint32_t mode;
  397. const int page_count = 1;
  398. mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
  399. /* DMA is a three step process */
  400. /*
  401. * 1. setup transfer type, interrupt when complete,
  402. * burst len = 64 bytes, the number of pages
  403. */
  404. denali_host_write(denali, mode,
  405. 0x01002000 | (64 << 16) | (write << 8) | page_count);
  406. /* 2. set memory low address */
  407. denali_host_write(denali, mode, dma_addr);
  408. /* 3. set memory high address */
  409. denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
  410. }
  411. static void denali_setup_dma32(struct denali_nand_info *denali,
  412. dma_addr_t dma_addr, int page, int write)
  413. {
  414. uint32_t mode;
  415. const int page_count = 1;
  416. mode = DENALI_MAP10 | DENALI_BANK(denali);
  417. /* DMA is a four step process */
  418. /* 1. setup transfer type and # of pages */
  419. denali_host_write(denali, mode | page,
  420. 0x2000 | (write << 8) | page_count);
  421. /* 2. set memory high address bits 23:8 */
  422. denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
  423. /* 3. set memory low address bits 23:8 */
  424. denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
  425. /* 4. interrupt when complete, burst len = 64 bytes */
  426. denali_host_write(denali, mode | 0x14000, 0x2400);
  427. }
  428. static void denali_setup_dma(struct denali_nand_info *denali,
  429. dma_addr_t dma_addr, int page, int write)
  430. {
  431. if (denali->caps & DENALI_CAP_DMA_64BIT)
  432. denali_setup_dma64(denali, dma_addr, page, write);
  433. else
  434. denali_setup_dma32(denali, dma_addr, page, write);
  435. }
  436. static int denali_pio_read(struct denali_nand_info *denali, void *buf,
  437. size_t size, int page, int raw)
  438. {
  439. uint32_t addr = DENALI_BANK(denali) | page;
  440. uint32_t *buf32 = (uint32_t *)buf;
  441. uint32_t irq_status, ecc_err_mask;
  442. int i;
  443. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  444. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  445. else
  446. ecc_err_mask = INTR__ECC_ERR;
  447. denali_reset_irq(denali);
  448. iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
  449. for (i = 0; i < size / 4; i++)
  450. *buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
  451. irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
  452. if (!(irq_status & INTR__PAGE_XFER_INC))
  453. return -EIO;
  454. if (irq_status & INTR__ERASED_PAGE)
  455. memset(buf, 0xff, size);
  456. return irq_status & ecc_err_mask ? -EBADMSG : 0;
  457. }
  458. static int denali_pio_write(struct denali_nand_info *denali,
  459. const void *buf, size_t size, int page, int raw)
  460. {
  461. uint32_t addr = DENALI_BANK(denali) | page;
  462. const uint32_t *buf32 = (uint32_t *)buf;
  463. uint32_t irq_status;
  464. int i;
  465. denali_reset_irq(denali);
  466. iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
  467. for (i = 0; i < size / 4; i++)
  468. iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
  469. irq_status = denali_wait_for_irq(denali,
  470. INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
  471. if (!(irq_status & INTR__PROGRAM_COMP))
  472. return -EIO;
  473. return 0;
  474. }
  475. static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
  476. size_t size, int page, int raw, int write)
  477. {
  478. if (write)
  479. return denali_pio_write(denali, buf, size, page, raw);
  480. else
  481. return denali_pio_read(denali, buf, size, page, raw);
  482. }
  483. static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
  484. size_t size, int page, int raw, int write)
  485. {
  486. dma_addr_t dma_addr;
  487. uint32_t irq_mask, irq_status, ecc_err_mask;
  488. enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  489. int ret = 0;
  490. dma_addr = dma_map_single(denali->dev, buf, size, dir);
  491. if (dma_mapping_error(denali->dev, dma_addr)) {
  492. dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
  493. return denali_pio_xfer(denali, buf, size, page, raw, write);
  494. }
  495. if (write) {
  496. /*
  497. * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
  498. * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
  499. * when the page program is completed.
  500. */
  501. irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
  502. ecc_err_mask = 0;
  503. } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
  504. irq_mask = INTR__DMA_CMD_COMP;
  505. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  506. } else {
  507. irq_mask = INTR__DMA_CMD_COMP;
  508. ecc_err_mask = INTR__ECC_ERR;
  509. }
  510. denali_enable_dma(denali, true);
  511. denali_reset_irq(denali);
  512. denali_setup_dma(denali, dma_addr, page, write);
  513. /* wait for operation to complete */
  514. irq_status = denali_wait_for_irq(denali, irq_mask);
  515. if (!(irq_status & INTR__DMA_CMD_COMP))
  516. ret = -EIO;
  517. else if (irq_status & ecc_err_mask)
  518. ret = -EBADMSG;
  519. denali_enable_dma(denali, false);
  520. dma_unmap_single(denali->dev, dma_addr, size, dir);
  521. if (irq_status & INTR__ERASED_PAGE)
  522. memset(buf, 0xff, size);
  523. return ret;
  524. }
  525. static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
  526. size_t size, int page, int raw, int write)
  527. {
  528. setup_ecc_for_xfer(denali, !raw, raw);
  529. if (denali->dma_avail)
  530. return denali_dma_xfer(denali, buf, size, page, raw, write);
  531. else
  532. return denali_pio_xfer(denali, buf, size, page, raw, write);
  533. }
  534. static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
  535. int page, int write)
  536. {
  537. struct denali_nand_info *denali = mtd_to_denali(mtd);
  538. unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
  539. unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
  540. int writesize = mtd->writesize;
  541. int oobsize = mtd->oobsize;
  542. uint8_t *bufpoi = chip->oob_poi;
  543. int ecc_steps = chip->ecc.steps;
  544. int ecc_size = chip->ecc.size;
  545. int ecc_bytes = chip->ecc.bytes;
  546. int oob_skip = denali->oob_skip_bytes;
  547. size_t size = writesize + oobsize;
  548. int i, pos, len;
  549. /* BBM at the beginning of the OOB area */
  550. chip->cmdfunc(mtd, start_cmd, writesize, page);
  551. if (write)
  552. chip->write_buf(mtd, bufpoi, oob_skip);
  553. else
  554. chip->read_buf(mtd, bufpoi, oob_skip);
  555. bufpoi += oob_skip;
  556. /* OOB ECC */
  557. for (i = 0; i < ecc_steps; i++) {
  558. pos = ecc_size + i * (ecc_size + ecc_bytes);
  559. len = ecc_bytes;
  560. if (pos >= writesize)
  561. pos += oob_skip;
  562. else if (pos + len > writesize)
  563. len = writesize - pos;
  564. chip->cmdfunc(mtd, rnd_cmd, pos, -1);
  565. if (write)
  566. chip->write_buf(mtd, bufpoi, len);
  567. else
  568. chip->read_buf(mtd, bufpoi, len);
  569. bufpoi += len;
  570. if (len < ecc_bytes) {
  571. len = ecc_bytes - len;
  572. chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
  573. if (write)
  574. chip->write_buf(mtd, bufpoi, len);
  575. else
  576. chip->read_buf(mtd, bufpoi, len);
  577. bufpoi += len;
  578. }
  579. }
  580. /* OOB free */
  581. len = oobsize - (bufpoi - chip->oob_poi);
  582. chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
  583. if (write)
  584. chip->write_buf(mtd, bufpoi, len);
  585. else
  586. chip->read_buf(mtd, bufpoi, len);
  587. }
  588. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  589. uint8_t *buf, int oob_required, int page)
  590. {
  591. struct denali_nand_info *denali = mtd_to_denali(mtd);
  592. int writesize = mtd->writesize;
  593. int oobsize = mtd->oobsize;
  594. int ecc_steps = chip->ecc.steps;
  595. int ecc_size = chip->ecc.size;
  596. int ecc_bytes = chip->ecc.bytes;
  597. void *dma_buf = denali->buf;
  598. int oob_skip = denali->oob_skip_bytes;
  599. size_t size = writesize + oobsize;
  600. int ret, i, pos, len;
  601. ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
  602. if (ret)
  603. return ret;
  604. /* Arrange the buffer for syndrome payload/ecc layout */
  605. if (buf) {
  606. for (i = 0; i < ecc_steps; i++) {
  607. pos = i * (ecc_size + ecc_bytes);
  608. len = ecc_size;
  609. if (pos >= writesize)
  610. pos += oob_skip;
  611. else if (pos + len > writesize)
  612. len = writesize - pos;
  613. memcpy(buf, dma_buf + pos, len);
  614. buf += len;
  615. if (len < ecc_size) {
  616. len = ecc_size - len;
  617. memcpy(buf, dma_buf + writesize + oob_skip,
  618. len);
  619. buf += len;
  620. }
  621. }
  622. }
  623. if (oob_required) {
  624. uint8_t *oob = chip->oob_poi;
  625. /* BBM at the beginning of the OOB area */
  626. memcpy(oob, dma_buf + writesize, oob_skip);
  627. oob += oob_skip;
  628. /* OOB ECC */
  629. for (i = 0; i < ecc_steps; i++) {
  630. pos = ecc_size + i * (ecc_size + ecc_bytes);
  631. len = ecc_bytes;
  632. if (pos >= writesize)
  633. pos += oob_skip;
  634. else if (pos + len > writesize)
  635. len = writesize - pos;
  636. memcpy(oob, dma_buf + pos, len);
  637. oob += len;
  638. if (len < ecc_bytes) {
  639. len = ecc_bytes - len;
  640. memcpy(oob, dma_buf + writesize + oob_skip,
  641. len);
  642. oob += len;
  643. }
  644. }
  645. /* OOB free */
  646. len = oobsize - (oob - chip->oob_poi);
  647. memcpy(oob, dma_buf + size - len, len);
  648. }
  649. return 0;
  650. }
  651. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  652. int page)
  653. {
  654. denali_oob_xfer(mtd, chip, page, 0);
  655. return 0;
  656. }
  657. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  658. int page)
  659. {
  660. struct denali_nand_info *denali = mtd_to_denali(mtd);
  661. int status;
  662. denali_reset_irq(denali);
  663. denali_oob_xfer(mtd, chip, page, 1);
  664. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  665. status = chip->waitfunc(mtd, chip);
  666. return status & NAND_STATUS_FAIL ? -EIO : 0;
  667. }
  668. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  669. uint8_t *buf, int oob_required, int page)
  670. {
  671. struct denali_nand_info *denali = mtd_to_denali(mtd);
  672. unsigned long uncor_ecc_flags = 0;
  673. int stat = 0;
  674. int ret;
  675. ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
  676. if (ret && ret != -EBADMSG)
  677. return ret;
  678. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  679. stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
  680. else if (ret == -EBADMSG)
  681. stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
  682. if (stat < 0)
  683. return stat;
  684. if (uncor_ecc_flags) {
  685. ret = denali_read_oob(mtd, chip, page);
  686. if (ret)
  687. return ret;
  688. stat = denali_check_erased_page(mtd, chip, buf,
  689. uncor_ecc_flags, stat);
  690. }
  691. return stat;
  692. }
  693. static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  694. const uint8_t *buf, int oob_required, int page)
  695. {
  696. struct denali_nand_info *denali = mtd_to_denali(mtd);
  697. int writesize = mtd->writesize;
  698. int oobsize = mtd->oobsize;
  699. int ecc_steps = chip->ecc.steps;
  700. int ecc_size = chip->ecc.size;
  701. int ecc_bytes = chip->ecc.bytes;
  702. void *dma_buf = denali->buf;
  703. int oob_skip = denali->oob_skip_bytes;
  704. size_t size = writesize + oobsize;
  705. int i, pos, len;
  706. /*
  707. * Fill the buffer with 0xff first except the full page transfer.
  708. * This simplifies the logic.
  709. */
  710. if (!buf || !oob_required)
  711. memset(dma_buf, 0xff, size);
  712. /* Arrange the buffer for syndrome payload/ecc layout */
  713. if (buf) {
  714. for (i = 0; i < ecc_steps; i++) {
  715. pos = i * (ecc_size + ecc_bytes);
  716. len = ecc_size;
  717. if (pos >= writesize)
  718. pos += oob_skip;
  719. else if (pos + len > writesize)
  720. len = writesize - pos;
  721. memcpy(dma_buf + pos, buf, len);
  722. buf += len;
  723. if (len < ecc_size) {
  724. len = ecc_size - len;
  725. memcpy(dma_buf + writesize + oob_skip, buf,
  726. len);
  727. buf += len;
  728. }
  729. }
  730. }
  731. if (oob_required) {
  732. const uint8_t *oob = chip->oob_poi;
  733. /* BBM at the beginning of the OOB area */
  734. memcpy(dma_buf + writesize, oob, oob_skip);
  735. oob += oob_skip;
  736. /* OOB ECC */
  737. for (i = 0; i < ecc_steps; i++) {
  738. pos = ecc_size + i * (ecc_size + ecc_bytes);
  739. len = ecc_bytes;
  740. if (pos >= writesize)
  741. pos += oob_skip;
  742. else if (pos + len > writesize)
  743. len = writesize - pos;
  744. memcpy(dma_buf + pos, oob, len);
  745. oob += len;
  746. if (len < ecc_bytes) {
  747. len = ecc_bytes - len;
  748. memcpy(dma_buf + writesize + oob_skip, oob,
  749. len);
  750. oob += len;
  751. }
  752. }
  753. /* OOB free */
  754. len = oobsize - (oob - chip->oob_poi);
  755. memcpy(dma_buf + size - len, oob, len);
  756. }
  757. return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
  758. }
  759. static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  760. const uint8_t *buf, int oob_required, int page)
  761. {
  762. struct denali_nand_info *denali = mtd_to_denali(mtd);
  763. return denali_data_xfer(denali, (void *)buf, mtd->writesize,
  764. page, 0, 1);
  765. }
  766. static void denali_select_chip(struct mtd_info *mtd, int chip)
  767. {
  768. struct denali_nand_info *denali = mtd_to_denali(mtd);
  769. denali->active_bank = chip;
  770. }
  771. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  772. {
  773. struct denali_nand_info *denali = mtd_to_denali(mtd);
  774. uint32_t irq_status;
  775. /* R/B# pin transitioned from low to high? */
  776. irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
  777. return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
  778. }
  779. static int denali_erase(struct mtd_info *mtd, int page)
  780. {
  781. struct denali_nand_info *denali = mtd_to_denali(mtd);
  782. uint32_t irq_status;
  783. denali_reset_irq(denali);
  784. denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
  785. DENALI_ERASE);
  786. /* wait for erase to complete or failure to occur */
  787. irq_status = denali_wait_for_irq(denali,
  788. INTR__ERASE_COMP | INTR__ERASE_FAIL);
  789. return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
  790. }
  791. static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
  792. const struct nand_data_interface *conf)
  793. {
  794. struct denali_nand_info *denali = mtd_to_denali(mtd);
  795. const struct nand_sdr_timings *timings;
  796. unsigned long t_clk;
  797. int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
  798. int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
  799. int addr_2_data_mask;
  800. uint32_t tmp;
  801. timings = nand_get_sdr_timings(conf);
  802. if (IS_ERR(timings))
  803. return PTR_ERR(timings);
  804. /* clk_x period in picoseconds */
  805. t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
  806. if (!t_clk)
  807. return -EINVAL;
  808. if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
  809. return 0;
  810. /* tREA -> ACC_CLKS */
  811. acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
  812. acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
  813. tmp = ioread32(denali->reg + ACC_CLKS);
  814. tmp &= ~ACC_CLKS__VALUE;
  815. tmp |= acc_clks;
  816. iowrite32(tmp, denali->reg + ACC_CLKS);
  817. /* tRWH -> RE_2_WE */
  818. re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
  819. re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
  820. tmp = ioread32(denali->reg + RE_2_WE);
  821. tmp &= ~RE_2_WE__VALUE;
  822. tmp |= re_2_we;
  823. iowrite32(tmp, denali->reg + RE_2_WE);
  824. /* tRHZ -> RE_2_RE */
  825. re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
  826. re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
  827. tmp = ioread32(denali->reg + RE_2_RE);
  828. tmp &= ~RE_2_RE__VALUE;
  829. tmp |= re_2_re;
  830. iowrite32(tmp, denali->reg + RE_2_RE);
  831. /* tWHR -> WE_2_RE */
  832. we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
  833. we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
  834. tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
  835. tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
  836. tmp |= we_2_re;
  837. iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
  838. /* tADL -> ADDR_2_DATA */
  839. /* for older versions, ADDR_2_DATA is only 6 bit wide */
  840. addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  841. if (denali->revision < 0x0501)
  842. addr_2_data_mask >>= 1;
  843. addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
  844. addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
  845. tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
  846. tmp &= ~addr_2_data_mask;
  847. tmp |= addr_2_data;
  848. iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
  849. /* tREH, tWH -> RDWR_EN_HI_CNT */
  850. rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
  851. t_clk);
  852. rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
  853. tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
  854. tmp &= ~RDWR_EN_HI_CNT__VALUE;
  855. tmp |= rdwr_en_hi;
  856. iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
  857. /* tRP, tWP -> RDWR_EN_LO_CNT */
  858. rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
  859. t_clk);
  860. rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
  861. t_clk);
  862. rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
  863. rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
  864. rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
  865. tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
  866. tmp &= ~RDWR_EN_LO_CNT__VALUE;
  867. tmp |= rdwr_en_lo;
  868. iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
  869. /* tCS, tCEA -> CS_SETUP_CNT */
  870. cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
  871. (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
  872. 0);
  873. cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
  874. tmp = ioread32(denali->reg + CS_SETUP_CNT);
  875. tmp &= ~CS_SETUP_CNT__VALUE;
  876. tmp |= cs_setup;
  877. iowrite32(tmp, denali->reg + CS_SETUP_CNT);
  878. return 0;
  879. }
  880. static void denali_reset_banks(struct denali_nand_info *denali)
  881. {
  882. u32 irq_status;
  883. int i;
  884. for (i = 0; i < denali->max_banks; i++) {
  885. denali->active_bank = i;
  886. denali_reset_irq(denali);
  887. iowrite32(DEVICE_RESET__BANK(i),
  888. denali->reg + DEVICE_RESET);
  889. irq_status = denali_wait_for_irq(denali,
  890. INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
  891. if (!(irq_status & INTR__INT_ACT))
  892. break;
  893. }
  894. dev_dbg(denali->dev, "%d chips connected\n", i);
  895. denali->max_banks = i;
  896. }
  897. static void denali_hw_init(struct denali_nand_info *denali)
  898. {
  899. /*
  900. * The REVISION register may not be reliable. Platforms are allowed to
  901. * override it.
  902. */
  903. if (!denali->revision)
  904. denali->revision = swab16(ioread32(denali->reg + REVISION));
  905. /*
  906. * tell driver how many bit controller will skip before
  907. * writing ECC code in OOB, this register may be already
  908. * set by firmware. So we read this value out.
  909. * if this value is 0, just let it be.
  910. */
  911. denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
  912. detect_max_banks(denali);
  913. iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
  914. iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
  915. iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
  916. /* Should set value for these registers when init */
  917. iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
  918. iowrite32(1, denali->reg + ECC_ENABLE);
  919. }
  920. int denali_calc_ecc_bytes(int step_size, int strength)
  921. {
  922. /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
  923. return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
  924. }
  925. EXPORT_SYMBOL(denali_calc_ecc_bytes);
  926. static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
  927. struct denali_nand_info *denali)
  928. {
  929. int oobavail = mtd->oobsize - denali->oob_skip_bytes;
  930. int ret;
  931. /*
  932. * If .size and .strength are already set (usually by DT),
  933. * check if they are supported by this controller.
  934. */
  935. if (chip->ecc.size && chip->ecc.strength)
  936. return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
  937. /*
  938. * We want .size and .strength closest to the chip's requirement
  939. * unless NAND_ECC_MAXIMIZE is requested.
  940. */
  941. if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
  942. ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
  943. if (!ret)
  944. return 0;
  945. }
  946. /* Max ECC strength is the last thing we can do */
  947. return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
  948. }
  949. static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
  950. struct mtd_oob_region *oobregion)
  951. {
  952. struct denali_nand_info *denali = mtd_to_denali(mtd);
  953. struct nand_chip *chip = mtd_to_nand(mtd);
  954. if (section)
  955. return -ERANGE;
  956. oobregion->offset = denali->oob_skip_bytes;
  957. oobregion->length = chip->ecc.total;
  958. return 0;
  959. }
  960. static int denali_ooblayout_free(struct mtd_info *mtd, int section,
  961. struct mtd_oob_region *oobregion)
  962. {
  963. struct denali_nand_info *denali = mtd_to_denali(mtd);
  964. struct nand_chip *chip = mtd_to_nand(mtd);
  965. if (section)
  966. return -ERANGE;
  967. oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
  968. oobregion->length = mtd->oobsize - oobregion->offset;
  969. return 0;
  970. }
  971. static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
  972. .ecc = denali_ooblayout_ecc,
  973. .free = denali_ooblayout_free,
  974. };
  975. /* initialize driver data structures */
  976. static void denali_drv_init(struct denali_nand_info *denali)
  977. {
  978. /*
  979. * the completion object will be used to notify
  980. * the callee that the interrupt is done
  981. */
  982. init_completion(&denali->complete);
  983. /*
  984. * the spinlock will be used to synchronize the ISR with any
  985. * element that might be access shared data (interrupt status)
  986. */
  987. spin_lock_init(&denali->irq_lock);
  988. }
  989. static int denali_multidev_fixup(struct denali_nand_info *denali)
  990. {
  991. struct nand_chip *chip = &denali->nand;
  992. struct mtd_info *mtd = nand_to_mtd(chip);
  993. /*
  994. * Support for multi device:
  995. * When the IP configuration is x16 capable and two x8 chips are
  996. * connected in parallel, DEVICES_CONNECTED should be set to 2.
  997. * In this case, the core framework knows nothing about this fact,
  998. * so we should tell it the _logical_ pagesize and anything necessary.
  999. */
  1000. denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
  1001. /*
  1002. * On some SoCs, DEVICES_CONNECTED is not auto-detected.
  1003. * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
  1004. */
  1005. if (denali->devs_per_cs == 0) {
  1006. denali->devs_per_cs = 1;
  1007. iowrite32(1, denali->reg + DEVICES_CONNECTED);
  1008. }
  1009. if (denali->devs_per_cs == 1)
  1010. return 0;
  1011. if (denali->devs_per_cs != 2) {
  1012. dev_err(denali->dev, "unsupported number of devices %d\n",
  1013. denali->devs_per_cs);
  1014. return -EINVAL;
  1015. }
  1016. /* 2 chips in parallel */
  1017. mtd->size <<= 1;
  1018. mtd->erasesize <<= 1;
  1019. mtd->writesize <<= 1;
  1020. mtd->oobsize <<= 1;
  1021. chip->chipsize <<= 1;
  1022. chip->page_shift += 1;
  1023. chip->phys_erase_shift += 1;
  1024. chip->bbt_erase_shift += 1;
  1025. chip->chip_shift += 1;
  1026. chip->pagemask <<= 1;
  1027. chip->ecc.size <<= 1;
  1028. chip->ecc.bytes <<= 1;
  1029. chip->ecc.strength <<= 1;
  1030. denali->oob_skip_bytes <<= 1;
  1031. return 0;
  1032. }
  1033. int denali_init(struct denali_nand_info *denali)
  1034. {
  1035. struct nand_chip *chip = &denali->nand;
  1036. struct mtd_info *mtd = nand_to_mtd(chip);
  1037. int ret;
  1038. mtd->dev.parent = denali->dev;
  1039. denali_hw_init(denali);
  1040. denali_drv_init(denali);
  1041. denali_clear_irq_all(denali);
  1042. /* Request IRQ after all the hardware initialization is finished */
  1043. ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
  1044. IRQF_SHARED, DENALI_NAND_NAME, denali);
  1045. if (ret) {
  1046. dev_err(denali->dev, "Unable to request IRQ\n");
  1047. return ret;
  1048. }
  1049. denali_enable_irq(denali);
  1050. denali_reset_banks(denali);
  1051. denali->active_bank = DENALI_INVALID_BANK;
  1052. nand_set_flash_node(chip, denali->dev->of_node);
  1053. /* Fallback to the default name if DT did not give "label" property */
  1054. if (!mtd->name)
  1055. mtd->name = "denali-nand";
  1056. /* register the driver with the NAND core subsystem */
  1057. chip->select_chip = denali_select_chip;
  1058. chip->read_byte = denali_read_byte;
  1059. chip->write_byte = denali_write_byte;
  1060. chip->read_word = denali_read_word;
  1061. chip->cmd_ctrl = denali_cmd_ctrl;
  1062. chip->dev_ready = denali_dev_ready;
  1063. chip->waitfunc = denali_waitfunc;
  1064. /* clk rate info is needed for setup_data_interface */
  1065. if (denali->clk_x_rate)
  1066. chip->setup_data_interface = denali_setup_data_interface;
  1067. /*
  1068. * scan for NAND devices attached to the controller
  1069. * this is the first stage in a two step process to register
  1070. * with the nand subsystem
  1071. */
  1072. ret = nand_scan_ident(mtd, denali->max_banks, NULL);
  1073. if (ret)
  1074. goto disable_irq;
  1075. if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
  1076. denali->dma_avail = 1;
  1077. if (denali->dma_avail) {
  1078. int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
  1079. ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
  1080. if (ret) {
  1081. dev_info(denali->dev,
  1082. "Failed to set DMA mask. Disabling DMA.\n");
  1083. denali->dma_avail = 0;
  1084. }
  1085. }
  1086. if (denali->dma_avail) {
  1087. chip->options |= NAND_USE_BOUNCE_BUFFER;
  1088. chip->buf_align = 16;
  1089. }
  1090. /*
  1091. * second stage of the NAND scan
  1092. * this stage requires information regarding ECC and
  1093. * bad block management.
  1094. */
  1095. chip->bbt_options |= NAND_BBT_USE_FLASH;
  1096. chip->bbt_options |= NAND_BBT_NO_OOB;
  1097. chip->ecc.mode = NAND_ECC_HW_SYNDROME;
  1098. /* no subpage writes on denali */
  1099. chip->options |= NAND_NO_SUBPAGE_WRITE;
  1100. ret = denali_ecc_setup(mtd, chip, denali);
  1101. if (ret) {
  1102. dev_err(denali->dev, "Failed to setup ECC settings.\n");
  1103. goto disable_irq;
  1104. }
  1105. dev_dbg(denali->dev,
  1106. "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
  1107. chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
  1108. iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
  1109. denali->reg + ECC_CORRECTION);
  1110. iowrite32(mtd->erasesize / mtd->writesize,
  1111. denali->reg + PAGES_PER_BLOCK);
  1112. iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
  1113. denali->reg + DEVICE_WIDTH);
  1114. iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
  1115. iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
  1116. iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
  1117. iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
  1118. /* chip->ecc.steps is set by nand_scan_tail(); not available here */
  1119. iowrite32(mtd->writesize / chip->ecc.size,
  1120. denali->reg + CFG_NUM_DATA_BLOCKS);
  1121. mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
  1122. if (chip->options & NAND_BUSWIDTH_16) {
  1123. chip->read_buf = denali_read_buf16;
  1124. chip->write_buf = denali_write_buf16;
  1125. } else {
  1126. chip->read_buf = denali_read_buf;
  1127. chip->write_buf = denali_write_buf;
  1128. }
  1129. chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
  1130. chip->ecc.read_page = denali_read_page;
  1131. chip->ecc.read_page_raw = denali_read_page_raw;
  1132. chip->ecc.write_page = denali_write_page;
  1133. chip->ecc.write_page_raw = denali_write_page_raw;
  1134. chip->ecc.read_oob = denali_read_oob;
  1135. chip->ecc.write_oob = denali_write_oob;
  1136. chip->erase = denali_erase;
  1137. ret = denali_multidev_fixup(denali);
  1138. if (ret)
  1139. goto disable_irq;
  1140. /*
  1141. * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
  1142. * use devm_kmalloc() because the memory allocated by devm_ does not
  1143. * guarantee DMA-safe alignment.
  1144. */
  1145. denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  1146. if (!denali->buf) {
  1147. ret = -ENOMEM;
  1148. goto disable_irq;
  1149. }
  1150. ret = nand_scan_tail(mtd);
  1151. if (ret)
  1152. goto free_buf;
  1153. ret = mtd_device_register(mtd, NULL, 0);
  1154. if (ret) {
  1155. dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
  1156. goto free_buf;
  1157. }
  1158. return 0;
  1159. free_buf:
  1160. kfree(denali->buf);
  1161. disable_irq:
  1162. denali_disable_irq(denali);
  1163. return ret;
  1164. }
  1165. EXPORT_SYMBOL(denali_init);
  1166. /* driver exit point */
  1167. void denali_remove(struct denali_nand_info *denali)
  1168. {
  1169. struct mtd_info *mtd = nand_to_mtd(&denali->nand);
  1170. nand_release(mtd);
  1171. kfree(denali->buf);
  1172. denali_disable_irq(denali);
  1173. }
  1174. EXPORT_SYMBOL(denali_remove);