denali.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright © 2009-2010, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/bitfield.h>
  15. #include <linux/completion.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/module.h>
  20. #include <linux/mtd/mtd.h>
  21. #include <linux/mtd/rawnand.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include "denali.h"
  25. MODULE_LICENSE("GPL");
  26. #define DENALI_NAND_NAME "denali-nand"
  27. /* for Indexed Addressing */
  28. #define DENALI_INDEXED_CTRL 0x00
  29. #define DENALI_INDEXED_DATA 0x10
  30. #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
  31. #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
  32. #define DENALI_MAP10 (2 << 26) /* high-level control plane */
  33. #define DENALI_MAP11 (3 << 26) /* direct controller access */
  34. /* MAP11 access cycle type */
  35. #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
  36. #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
  37. #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
  38. /* MAP10 commands */
  39. #define DENALI_ERASE 0x01
  40. #define DENALI_BANK(denali) ((denali)->active_bank << 24)
  41. #define DENALI_INVALID_BANK -1
  42. #define DENALI_NR_BANKS 4
  43. static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  44. {
  45. return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  46. }
  47. /*
  48. * Direct Addressing - the slave address forms the control information (command
  49. * type, bank, block, and page address). The slave data is the actual data to
  50. * be transferred. This mode requires 28 bits of address region allocated.
  51. */
  52. static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
  53. {
  54. return ioread32(denali->host + addr);
  55. }
  56. static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
  57. u32 data)
  58. {
  59. iowrite32(data, denali->host + addr);
  60. }
  61. /*
  62. * Indexed Addressing - address translation module intervenes in passing the
  63. * control information. This mode reduces the required address range. The
  64. * control information and transferred data are latched by the registers in
  65. * the translation module.
  66. */
  67. static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
  68. {
  69. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  70. return ioread32(denali->host + DENALI_INDEXED_DATA);
  71. }
  72. static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
  73. u32 data)
  74. {
  75. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  76. iowrite32(data, denali->host + DENALI_INDEXED_DATA);
  77. }
  78. /*
  79. * Use the configuration feature register to determine the maximum number of
  80. * banks that the hardware supports.
  81. */
  82. static void denali_detect_max_banks(struct denali_nand_info *denali)
  83. {
  84. uint32_t features = ioread32(denali->reg + FEATURES);
  85. denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
  86. /* the encoding changed from rev 5.0 to 5.1 */
  87. if (denali->revision < 0x0501)
  88. denali->max_banks <<= 1;
  89. }
  90. static void denali_enable_irq(struct denali_nand_info *denali)
  91. {
  92. int i;
  93. for (i = 0; i < DENALI_NR_BANKS; i++)
  94. iowrite32(U32_MAX, denali->reg + INTR_EN(i));
  95. iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
  96. }
  97. static void denali_disable_irq(struct denali_nand_info *denali)
  98. {
  99. int i;
  100. for (i = 0; i < DENALI_NR_BANKS; i++)
  101. iowrite32(0, denali->reg + INTR_EN(i));
  102. iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
  103. }
  104. static void denali_clear_irq(struct denali_nand_info *denali,
  105. int bank, uint32_t irq_status)
  106. {
  107. /* write one to clear bits */
  108. iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
  109. }
  110. static void denali_clear_irq_all(struct denali_nand_info *denali)
  111. {
  112. int i;
  113. for (i = 0; i < DENALI_NR_BANKS; i++)
  114. denali_clear_irq(denali, i, U32_MAX);
  115. }
  116. static irqreturn_t denali_isr(int irq, void *dev_id)
  117. {
  118. struct denali_nand_info *denali = dev_id;
  119. irqreturn_t ret = IRQ_NONE;
  120. uint32_t irq_status;
  121. int i;
  122. spin_lock(&denali->irq_lock);
  123. for (i = 0; i < DENALI_NR_BANKS; i++) {
  124. irq_status = ioread32(denali->reg + INTR_STATUS(i));
  125. if (irq_status)
  126. ret = IRQ_HANDLED;
  127. denali_clear_irq(denali, i, irq_status);
  128. if (i != denali->active_bank)
  129. continue;
  130. denali->irq_status |= irq_status;
  131. if (denali->irq_status & denali->irq_mask)
  132. complete(&denali->complete);
  133. }
  134. spin_unlock(&denali->irq_lock);
  135. return ret;
  136. }
  137. static void denali_reset_irq(struct denali_nand_info *denali)
  138. {
  139. unsigned long flags;
  140. spin_lock_irqsave(&denali->irq_lock, flags);
  141. denali->irq_status = 0;
  142. denali->irq_mask = 0;
  143. spin_unlock_irqrestore(&denali->irq_lock, flags);
  144. }
  145. static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
  146. uint32_t irq_mask)
  147. {
  148. unsigned long time_left, flags;
  149. uint32_t irq_status;
  150. spin_lock_irqsave(&denali->irq_lock, flags);
  151. irq_status = denali->irq_status;
  152. if (irq_mask & irq_status) {
  153. /* return immediately if the IRQ has already happened. */
  154. spin_unlock_irqrestore(&denali->irq_lock, flags);
  155. return irq_status;
  156. }
  157. denali->irq_mask = irq_mask;
  158. reinit_completion(&denali->complete);
  159. spin_unlock_irqrestore(&denali->irq_lock, flags);
  160. time_left = wait_for_completion_timeout(&denali->complete,
  161. msecs_to_jiffies(1000));
  162. if (!time_left) {
  163. dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
  164. irq_mask);
  165. return 0;
  166. }
  167. return denali->irq_status;
  168. }
  169. static uint32_t denali_check_irq(struct denali_nand_info *denali)
  170. {
  171. unsigned long flags;
  172. uint32_t irq_status;
  173. spin_lock_irqsave(&denali->irq_lock, flags);
  174. irq_status = denali->irq_status;
  175. spin_unlock_irqrestore(&denali->irq_lock, flags);
  176. return irq_status;
  177. }
  178. static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  179. {
  180. struct denali_nand_info *denali = mtd_to_denali(mtd);
  181. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  182. int i;
  183. for (i = 0; i < len; i++)
  184. buf[i] = denali->host_read(denali, addr);
  185. }
  186. static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  187. {
  188. struct denali_nand_info *denali = mtd_to_denali(mtd);
  189. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  190. int i;
  191. for (i = 0; i < len; i++)
  192. denali->host_write(denali, addr, buf[i]);
  193. }
  194. static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
  195. {
  196. struct denali_nand_info *denali = mtd_to_denali(mtd);
  197. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  198. uint16_t *buf16 = (uint16_t *)buf;
  199. int i;
  200. for (i = 0; i < len / 2; i++)
  201. buf16[i] = denali->host_read(denali, addr);
  202. }
  203. static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
  204. int len)
  205. {
  206. struct denali_nand_info *denali = mtd_to_denali(mtd);
  207. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  208. const uint16_t *buf16 = (const uint16_t *)buf;
  209. int i;
  210. for (i = 0; i < len / 2; i++)
  211. denali->host_write(denali, addr, buf16[i]);
  212. }
  213. static uint8_t denali_read_byte(struct mtd_info *mtd)
  214. {
  215. uint8_t byte;
  216. denali_read_buf(mtd, &byte, 1);
  217. return byte;
  218. }
  219. static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
  220. {
  221. denali_write_buf(mtd, &byte, 1);
  222. }
  223. static uint16_t denali_read_word(struct mtd_info *mtd)
  224. {
  225. uint16_t word;
  226. denali_read_buf16(mtd, (uint8_t *)&word, 2);
  227. return word;
  228. }
  229. static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
  230. {
  231. struct denali_nand_info *denali = mtd_to_denali(mtd);
  232. uint32_t type;
  233. if (ctrl & NAND_CLE)
  234. type = DENALI_MAP11_CMD;
  235. else if (ctrl & NAND_ALE)
  236. type = DENALI_MAP11_ADDR;
  237. else
  238. return;
  239. /*
  240. * Some commands are followed by chip->dev_ready or chip->waitfunc.
  241. * irq_status must be cleared here to catch the R/B# interrupt later.
  242. */
  243. if (ctrl & NAND_CTRL_CHANGE)
  244. denali_reset_irq(denali);
  245. denali->host_write(denali, DENALI_BANK(denali) | type, dat);
  246. }
  247. static int denali_dev_ready(struct mtd_info *mtd)
  248. {
  249. struct denali_nand_info *denali = mtd_to_denali(mtd);
  250. return !!(denali_check_irq(denali) & INTR__INT_ACT);
  251. }
  252. static int denali_check_erased_page(struct mtd_info *mtd,
  253. struct nand_chip *chip, uint8_t *buf,
  254. unsigned long uncor_ecc_flags,
  255. unsigned int max_bitflips)
  256. {
  257. struct denali_nand_info *denali = mtd_to_denali(mtd);
  258. uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
  259. int ecc_steps = chip->ecc.steps;
  260. int ecc_size = chip->ecc.size;
  261. int ecc_bytes = chip->ecc.bytes;
  262. int i, stat;
  263. for (i = 0; i < ecc_steps; i++) {
  264. if (!(uncor_ecc_flags & BIT(i)))
  265. continue;
  266. stat = nand_check_erased_ecc_chunk(buf, ecc_size,
  267. ecc_code, ecc_bytes,
  268. NULL, 0,
  269. chip->ecc.strength);
  270. if (stat < 0) {
  271. mtd->ecc_stats.failed++;
  272. } else {
  273. mtd->ecc_stats.corrected += stat;
  274. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  275. }
  276. buf += ecc_size;
  277. ecc_code += ecc_bytes;
  278. }
  279. return max_bitflips;
  280. }
  281. static int denali_hw_ecc_fixup(struct mtd_info *mtd,
  282. struct denali_nand_info *denali,
  283. unsigned long *uncor_ecc_flags)
  284. {
  285. struct nand_chip *chip = mtd_to_nand(mtd);
  286. int bank = denali->active_bank;
  287. uint32_t ecc_cor;
  288. unsigned int max_bitflips;
  289. ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
  290. ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
  291. if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
  292. /*
  293. * This flag is set when uncorrectable error occurs at least in
  294. * one ECC sector. We can not know "how many sectors", or
  295. * "which sector(s)". We need erase-page check for all sectors.
  296. */
  297. *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
  298. return 0;
  299. }
  300. max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
  301. /*
  302. * The register holds the maximum of per-sector corrected bitflips.
  303. * This is suitable for the return value of the ->read_page() callback.
  304. * Unfortunately, we can not know the total number of corrected bits in
  305. * the page. Increase the stats by max_bitflips. (compromised solution)
  306. */
  307. mtd->ecc_stats.corrected += max_bitflips;
  308. return max_bitflips;
  309. }
  310. static int denali_sw_ecc_fixup(struct mtd_info *mtd,
  311. struct denali_nand_info *denali,
  312. unsigned long *uncor_ecc_flags, uint8_t *buf)
  313. {
  314. unsigned int ecc_size = denali->nand.ecc.size;
  315. unsigned int bitflips = 0;
  316. unsigned int max_bitflips = 0;
  317. uint32_t err_addr, err_cor_info;
  318. unsigned int err_byte, err_sector, err_device;
  319. uint8_t err_cor_value;
  320. unsigned int prev_sector = 0;
  321. uint32_t irq_status;
  322. denali_reset_irq(denali);
  323. do {
  324. err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
  325. err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
  326. err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
  327. err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
  328. err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
  329. err_cor_info);
  330. err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
  331. err_cor_info);
  332. /* reset the bitflip counter when crossing ECC sector */
  333. if (err_sector != prev_sector)
  334. bitflips = 0;
  335. if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
  336. /*
  337. * Check later if this is a real ECC error, or
  338. * an erased sector.
  339. */
  340. *uncor_ecc_flags |= BIT(err_sector);
  341. } else if (err_byte < ecc_size) {
  342. /*
  343. * If err_byte is larger than ecc_size, means error
  344. * happened in OOB, so we ignore it. It's no need for
  345. * us to correct it err_device is represented the NAND
  346. * error bits are happened in if there are more than
  347. * one NAND connected.
  348. */
  349. int offset;
  350. unsigned int flips_in_byte;
  351. offset = (err_sector * ecc_size + err_byte) *
  352. denali->devs_per_cs + err_device;
  353. /* correct the ECC error */
  354. flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
  355. buf[offset] ^= err_cor_value;
  356. mtd->ecc_stats.corrected += flips_in_byte;
  357. bitflips += flips_in_byte;
  358. max_bitflips = max(max_bitflips, bitflips);
  359. }
  360. prev_sector = err_sector;
  361. } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
  362. /*
  363. * Once handle all ECC errors, controller will trigger an
  364. * ECC_TRANSACTION_DONE interrupt.
  365. */
  366. irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
  367. if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
  368. return -EIO;
  369. return max_bitflips;
  370. }
  371. static void denali_setup_dma64(struct denali_nand_info *denali,
  372. dma_addr_t dma_addr, int page, int write)
  373. {
  374. uint32_t mode;
  375. const int page_count = 1;
  376. mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
  377. /* DMA is a three step process */
  378. /*
  379. * 1. setup transfer type, interrupt when complete,
  380. * burst len = 64 bytes, the number of pages
  381. */
  382. denali->host_write(denali, mode,
  383. 0x01002000 | (64 << 16) | (write << 8) | page_count);
  384. /* 2. set memory low address */
  385. denali->host_write(denali, mode, lower_32_bits(dma_addr));
  386. /* 3. set memory high address */
  387. denali->host_write(denali, mode, upper_32_bits(dma_addr));
  388. }
  389. static void denali_setup_dma32(struct denali_nand_info *denali,
  390. dma_addr_t dma_addr, int page, int write)
  391. {
  392. uint32_t mode;
  393. const int page_count = 1;
  394. mode = DENALI_MAP10 | DENALI_BANK(denali);
  395. /* DMA is a four step process */
  396. /* 1. setup transfer type and # of pages */
  397. denali->host_write(denali, mode | page,
  398. 0x2000 | (write << 8) | page_count);
  399. /* 2. set memory high address bits 23:8 */
  400. denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
  401. /* 3. set memory low address bits 23:8 */
  402. denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
  403. /* 4. interrupt when complete, burst len = 64 bytes */
  404. denali->host_write(denali, mode | 0x14000, 0x2400);
  405. }
  406. static int denali_pio_read(struct denali_nand_info *denali, void *buf,
  407. size_t size, int page, int raw)
  408. {
  409. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  410. uint32_t *buf32 = (uint32_t *)buf;
  411. uint32_t irq_status, ecc_err_mask;
  412. int i;
  413. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  414. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  415. else
  416. ecc_err_mask = INTR__ECC_ERR;
  417. denali_reset_irq(denali);
  418. for (i = 0; i < size / 4; i++)
  419. *buf32++ = denali->host_read(denali, addr);
  420. irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
  421. if (!(irq_status & INTR__PAGE_XFER_INC))
  422. return -EIO;
  423. if (irq_status & INTR__ERASED_PAGE)
  424. memset(buf, 0xff, size);
  425. return irq_status & ecc_err_mask ? -EBADMSG : 0;
  426. }
  427. static int denali_pio_write(struct denali_nand_info *denali,
  428. const void *buf, size_t size, int page, int raw)
  429. {
  430. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  431. const uint32_t *buf32 = (uint32_t *)buf;
  432. uint32_t irq_status;
  433. int i;
  434. denali_reset_irq(denali);
  435. for (i = 0; i < size / 4; i++)
  436. denali->host_write(denali, addr, *buf32++);
  437. irq_status = denali_wait_for_irq(denali,
  438. INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
  439. if (!(irq_status & INTR__PROGRAM_COMP))
  440. return -EIO;
  441. return 0;
  442. }
  443. static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
  444. size_t size, int page, int raw, int write)
  445. {
  446. if (write)
  447. return denali_pio_write(denali, buf, size, page, raw);
  448. else
  449. return denali_pio_read(denali, buf, size, page, raw);
  450. }
  451. static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
  452. size_t size, int page, int raw, int write)
  453. {
  454. dma_addr_t dma_addr;
  455. uint32_t irq_mask, irq_status, ecc_err_mask;
  456. enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  457. int ret = 0;
  458. dma_addr = dma_map_single(denali->dev, buf, size, dir);
  459. if (dma_mapping_error(denali->dev, dma_addr)) {
  460. dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
  461. return denali_pio_xfer(denali, buf, size, page, raw, write);
  462. }
  463. if (write) {
  464. /*
  465. * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
  466. * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
  467. * when the page program is completed.
  468. */
  469. irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
  470. ecc_err_mask = 0;
  471. } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
  472. irq_mask = INTR__DMA_CMD_COMP;
  473. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  474. } else {
  475. irq_mask = INTR__DMA_CMD_COMP;
  476. ecc_err_mask = INTR__ECC_ERR;
  477. }
  478. iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
  479. denali_reset_irq(denali);
  480. denali->setup_dma(denali, dma_addr, page, write);
  481. irq_status = denali_wait_for_irq(denali, irq_mask);
  482. if (!(irq_status & INTR__DMA_CMD_COMP))
  483. ret = -EIO;
  484. else if (irq_status & ecc_err_mask)
  485. ret = -EBADMSG;
  486. iowrite32(0, denali->reg + DMA_ENABLE);
  487. dma_unmap_single(denali->dev, dma_addr, size, dir);
  488. if (irq_status & INTR__ERASED_PAGE)
  489. memset(buf, 0xff, size);
  490. return ret;
  491. }
  492. static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
  493. size_t size, int page, int raw, int write)
  494. {
  495. iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
  496. iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
  497. denali->reg + TRANSFER_SPARE_REG);
  498. if (denali->dma_avail)
  499. return denali_dma_xfer(denali, buf, size, page, raw, write);
  500. else
  501. return denali_pio_xfer(denali, buf, size, page, raw, write);
  502. }
  503. static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
  504. int page, int write)
  505. {
  506. struct denali_nand_info *denali = mtd_to_denali(mtd);
  507. int writesize = mtd->writesize;
  508. int oobsize = mtd->oobsize;
  509. uint8_t *bufpoi = chip->oob_poi;
  510. int ecc_steps = chip->ecc.steps;
  511. int ecc_size = chip->ecc.size;
  512. int ecc_bytes = chip->ecc.bytes;
  513. int oob_skip = denali->oob_skip_bytes;
  514. size_t size = writesize + oobsize;
  515. int i, pos, len;
  516. /* BBM at the beginning of the OOB area */
  517. if (write)
  518. nand_prog_page_begin_op(chip, page, writesize, bufpoi,
  519. oob_skip);
  520. else
  521. nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
  522. bufpoi += oob_skip;
  523. /* OOB ECC */
  524. for (i = 0; i < ecc_steps; i++) {
  525. pos = ecc_size + i * (ecc_size + ecc_bytes);
  526. len = ecc_bytes;
  527. if (pos >= writesize)
  528. pos += oob_skip;
  529. else if (pos + len > writesize)
  530. len = writesize - pos;
  531. if (write)
  532. nand_change_write_column_op(chip, pos, bufpoi, len,
  533. false);
  534. else
  535. nand_change_read_column_op(chip, pos, bufpoi, len,
  536. false);
  537. bufpoi += len;
  538. if (len < ecc_bytes) {
  539. len = ecc_bytes - len;
  540. if (write)
  541. nand_change_write_column_op(chip, writesize +
  542. oob_skip, bufpoi,
  543. len, false);
  544. else
  545. nand_change_read_column_op(chip, writesize +
  546. oob_skip, bufpoi,
  547. len, false);
  548. bufpoi += len;
  549. }
  550. }
  551. /* OOB free */
  552. len = oobsize - (bufpoi - chip->oob_poi);
  553. if (write)
  554. nand_change_write_column_op(chip, size - len, bufpoi, len,
  555. false);
  556. else
  557. nand_change_read_column_op(chip, size - len, bufpoi, len,
  558. false);
  559. }
  560. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  561. uint8_t *buf, int oob_required, int page)
  562. {
  563. struct denali_nand_info *denali = mtd_to_denali(mtd);
  564. int writesize = mtd->writesize;
  565. int oobsize = mtd->oobsize;
  566. int ecc_steps = chip->ecc.steps;
  567. int ecc_size = chip->ecc.size;
  568. int ecc_bytes = chip->ecc.bytes;
  569. void *tmp_buf = denali->buf;
  570. int oob_skip = denali->oob_skip_bytes;
  571. size_t size = writesize + oobsize;
  572. int ret, i, pos, len;
  573. ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
  574. if (ret)
  575. return ret;
  576. /* Arrange the buffer for syndrome payload/ecc layout */
  577. if (buf) {
  578. for (i = 0; i < ecc_steps; i++) {
  579. pos = i * (ecc_size + ecc_bytes);
  580. len = ecc_size;
  581. if (pos >= writesize)
  582. pos += oob_skip;
  583. else if (pos + len > writesize)
  584. len = writesize - pos;
  585. memcpy(buf, tmp_buf + pos, len);
  586. buf += len;
  587. if (len < ecc_size) {
  588. len = ecc_size - len;
  589. memcpy(buf, tmp_buf + writesize + oob_skip,
  590. len);
  591. buf += len;
  592. }
  593. }
  594. }
  595. if (oob_required) {
  596. uint8_t *oob = chip->oob_poi;
  597. /* BBM at the beginning of the OOB area */
  598. memcpy(oob, tmp_buf + writesize, oob_skip);
  599. oob += oob_skip;
  600. /* OOB ECC */
  601. for (i = 0; i < ecc_steps; i++) {
  602. pos = ecc_size + i * (ecc_size + ecc_bytes);
  603. len = ecc_bytes;
  604. if (pos >= writesize)
  605. pos += oob_skip;
  606. else if (pos + len > writesize)
  607. len = writesize - pos;
  608. memcpy(oob, tmp_buf + pos, len);
  609. oob += len;
  610. if (len < ecc_bytes) {
  611. len = ecc_bytes - len;
  612. memcpy(oob, tmp_buf + writesize + oob_skip,
  613. len);
  614. oob += len;
  615. }
  616. }
  617. /* OOB free */
  618. len = oobsize - (oob - chip->oob_poi);
  619. memcpy(oob, tmp_buf + size - len, len);
  620. }
  621. return 0;
  622. }
  623. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  624. int page)
  625. {
  626. denali_oob_xfer(mtd, chip, page, 0);
  627. return 0;
  628. }
  629. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  630. int page)
  631. {
  632. struct denali_nand_info *denali = mtd_to_denali(mtd);
  633. denali_reset_irq(denali);
  634. denali_oob_xfer(mtd, chip, page, 1);
  635. return nand_prog_page_end_op(chip);
  636. }
  637. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  638. uint8_t *buf, int oob_required, int page)
  639. {
  640. struct denali_nand_info *denali = mtd_to_denali(mtd);
  641. unsigned long uncor_ecc_flags = 0;
  642. int stat = 0;
  643. int ret;
  644. ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
  645. if (ret && ret != -EBADMSG)
  646. return ret;
  647. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  648. stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
  649. else if (ret == -EBADMSG)
  650. stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
  651. if (stat < 0)
  652. return stat;
  653. if (uncor_ecc_flags) {
  654. ret = denali_read_oob(mtd, chip, page);
  655. if (ret)
  656. return ret;
  657. stat = denali_check_erased_page(mtd, chip, buf,
  658. uncor_ecc_flags, stat);
  659. }
  660. return stat;
  661. }
  662. static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  663. const uint8_t *buf, int oob_required, int page)
  664. {
  665. struct denali_nand_info *denali = mtd_to_denali(mtd);
  666. int writesize = mtd->writesize;
  667. int oobsize = mtd->oobsize;
  668. int ecc_steps = chip->ecc.steps;
  669. int ecc_size = chip->ecc.size;
  670. int ecc_bytes = chip->ecc.bytes;
  671. void *tmp_buf = denali->buf;
  672. int oob_skip = denali->oob_skip_bytes;
  673. size_t size = writesize + oobsize;
  674. int i, pos, len;
  675. /*
  676. * Fill the buffer with 0xff first except the full page transfer.
  677. * This simplifies the logic.
  678. */
  679. if (!buf || !oob_required)
  680. memset(tmp_buf, 0xff, size);
  681. /* Arrange the buffer for syndrome payload/ecc layout */
  682. if (buf) {
  683. for (i = 0; i < ecc_steps; i++) {
  684. pos = i * (ecc_size + ecc_bytes);
  685. len = ecc_size;
  686. if (pos >= writesize)
  687. pos += oob_skip;
  688. else if (pos + len > writesize)
  689. len = writesize - pos;
  690. memcpy(tmp_buf + pos, buf, len);
  691. buf += len;
  692. if (len < ecc_size) {
  693. len = ecc_size - len;
  694. memcpy(tmp_buf + writesize + oob_skip, buf,
  695. len);
  696. buf += len;
  697. }
  698. }
  699. }
  700. if (oob_required) {
  701. const uint8_t *oob = chip->oob_poi;
  702. /* BBM at the beginning of the OOB area */
  703. memcpy(tmp_buf + writesize, oob, oob_skip);
  704. oob += oob_skip;
  705. /* OOB ECC */
  706. for (i = 0; i < ecc_steps; i++) {
  707. pos = ecc_size + i * (ecc_size + ecc_bytes);
  708. len = ecc_bytes;
  709. if (pos >= writesize)
  710. pos += oob_skip;
  711. else if (pos + len > writesize)
  712. len = writesize - pos;
  713. memcpy(tmp_buf + pos, oob, len);
  714. oob += len;
  715. if (len < ecc_bytes) {
  716. len = ecc_bytes - len;
  717. memcpy(tmp_buf + writesize + oob_skip, oob,
  718. len);
  719. oob += len;
  720. }
  721. }
  722. /* OOB free */
  723. len = oobsize - (oob - chip->oob_poi);
  724. memcpy(tmp_buf + size - len, oob, len);
  725. }
  726. return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
  727. }
  728. static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  729. const uint8_t *buf, int oob_required, int page)
  730. {
  731. struct denali_nand_info *denali = mtd_to_denali(mtd);
  732. return denali_data_xfer(denali, (void *)buf, mtd->writesize,
  733. page, 0, 1);
  734. }
  735. static void denali_select_chip(struct mtd_info *mtd, int chip)
  736. {
  737. struct denali_nand_info *denali = mtd_to_denali(mtd);
  738. denali->active_bank = chip;
  739. }
  740. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  741. {
  742. struct denali_nand_info *denali = mtd_to_denali(mtd);
  743. uint32_t irq_status;
  744. /* R/B# pin transitioned from low to high? */
  745. irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
  746. return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
  747. }
  748. static int denali_erase(struct mtd_info *mtd, int page)
  749. {
  750. struct denali_nand_info *denali = mtd_to_denali(mtd);
  751. uint32_t irq_status;
  752. denali_reset_irq(denali);
  753. denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
  754. DENALI_ERASE);
  755. /* wait for erase to complete or failure to occur */
  756. irq_status = denali_wait_for_irq(denali,
  757. INTR__ERASE_COMP | INTR__ERASE_FAIL);
  758. return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
  759. }
  760. static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
  761. const struct nand_data_interface *conf)
  762. {
  763. struct denali_nand_info *denali = mtd_to_denali(mtd);
  764. const struct nand_sdr_timings *timings;
  765. unsigned long t_x, mult_x;
  766. int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
  767. int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
  768. int addr_2_data_mask;
  769. uint32_t tmp;
  770. timings = nand_get_sdr_timings(conf);
  771. if (IS_ERR(timings))
  772. return PTR_ERR(timings);
  773. /* clk_x period in picoseconds */
  774. t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
  775. if (!t_x)
  776. return -EINVAL;
  777. /*
  778. * The bus interface clock, clk_x, is phase aligned with the core clock.
  779. * The clk_x is an integral multiple N of the core clk. The value N is
  780. * configured at IP delivery time, and its available value is 4, 5, 6.
  781. */
  782. mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
  783. if (mult_x < 4 || mult_x > 6)
  784. return -EINVAL;
  785. if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
  786. return 0;
  787. /* tREA -> ACC_CLKS */
  788. acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
  789. acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
  790. tmp = ioread32(denali->reg + ACC_CLKS);
  791. tmp &= ~ACC_CLKS__VALUE;
  792. tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
  793. iowrite32(tmp, denali->reg + ACC_CLKS);
  794. /* tRWH -> RE_2_WE */
  795. re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
  796. re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
  797. tmp = ioread32(denali->reg + RE_2_WE);
  798. tmp &= ~RE_2_WE__VALUE;
  799. tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
  800. iowrite32(tmp, denali->reg + RE_2_WE);
  801. /* tRHZ -> RE_2_RE */
  802. re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
  803. re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
  804. tmp = ioread32(denali->reg + RE_2_RE);
  805. tmp &= ~RE_2_RE__VALUE;
  806. tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
  807. iowrite32(tmp, denali->reg + RE_2_RE);
  808. /*
  809. * tCCS, tWHR -> WE_2_RE
  810. *
  811. * With WE_2_RE properly set, the Denali controller automatically takes
  812. * care of the delay; the driver need not set NAND_WAIT_TCCS.
  813. */
  814. we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
  815. we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
  816. tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
  817. tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
  818. tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
  819. iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
  820. /* tADL -> ADDR_2_DATA */
  821. /* for older versions, ADDR_2_DATA is only 6 bit wide */
  822. addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  823. if (denali->revision < 0x0501)
  824. addr_2_data_mask >>= 1;
  825. addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
  826. addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
  827. tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
  828. tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  829. tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
  830. iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
  831. /* tREH, tWH -> RDWR_EN_HI_CNT */
  832. rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
  833. t_x);
  834. rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
  835. tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
  836. tmp &= ~RDWR_EN_HI_CNT__VALUE;
  837. tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
  838. iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
  839. /* tRP, tWP -> RDWR_EN_LO_CNT */
  840. rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
  841. rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
  842. t_x);
  843. rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
  844. rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
  845. rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
  846. tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
  847. tmp &= ~RDWR_EN_LO_CNT__VALUE;
  848. tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
  849. iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
  850. /* tCS, tCEA -> CS_SETUP_CNT */
  851. cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
  852. (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
  853. 0);
  854. cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
  855. tmp = ioread32(denali->reg + CS_SETUP_CNT);
  856. tmp &= ~CS_SETUP_CNT__VALUE;
  857. tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
  858. iowrite32(tmp, denali->reg + CS_SETUP_CNT);
  859. return 0;
  860. }
  861. static void denali_reset_banks(struct denali_nand_info *denali)
  862. {
  863. u32 irq_status;
  864. int i;
  865. for (i = 0; i < denali->max_banks; i++) {
  866. denali->active_bank = i;
  867. denali_reset_irq(denali);
  868. iowrite32(DEVICE_RESET__BANK(i),
  869. denali->reg + DEVICE_RESET);
  870. irq_status = denali_wait_for_irq(denali,
  871. INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
  872. if (!(irq_status & INTR__INT_ACT))
  873. break;
  874. }
  875. dev_dbg(denali->dev, "%d chips connected\n", i);
  876. denali->max_banks = i;
  877. }
  878. static void denali_hw_init(struct denali_nand_info *denali)
  879. {
  880. /*
  881. * The REVISION register may not be reliable. Platforms are allowed to
  882. * override it.
  883. */
  884. if (!denali->revision)
  885. denali->revision = swab16(ioread32(denali->reg + REVISION));
  886. /*
  887. * tell driver how many bit controller will skip before
  888. * writing ECC code in OOB, this register may be already
  889. * set by firmware. So we read this value out.
  890. * if this value is 0, just let it be.
  891. */
  892. denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
  893. denali_detect_max_banks(denali);
  894. iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
  895. iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
  896. iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
  897. }
  898. int denali_calc_ecc_bytes(int step_size, int strength)
  899. {
  900. /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
  901. return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
  902. }
  903. EXPORT_SYMBOL(denali_calc_ecc_bytes);
  904. static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
  905. struct mtd_oob_region *oobregion)
  906. {
  907. struct denali_nand_info *denali = mtd_to_denali(mtd);
  908. struct nand_chip *chip = mtd_to_nand(mtd);
  909. if (section)
  910. return -ERANGE;
  911. oobregion->offset = denali->oob_skip_bytes;
  912. oobregion->length = chip->ecc.total;
  913. return 0;
  914. }
  915. static int denali_ooblayout_free(struct mtd_info *mtd, int section,
  916. struct mtd_oob_region *oobregion)
  917. {
  918. struct denali_nand_info *denali = mtd_to_denali(mtd);
  919. struct nand_chip *chip = mtd_to_nand(mtd);
  920. if (section)
  921. return -ERANGE;
  922. oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
  923. oobregion->length = mtd->oobsize - oobregion->offset;
  924. return 0;
  925. }
  926. static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
  927. .ecc = denali_ooblayout_ecc,
  928. .free = denali_ooblayout_free,
  929. };
  930. static int denali_multidev_fixup(struct denali_nand_info *denali)
  931. {
  932. struct nand_chip *chip = &denali->nand;
  933. struct mtd_info *mtd = nand_to_mtd(chip);
  934. /*
  935. * Support for multi device:
  936. * When the IP configuration is x16 capable and two x8 chips are
  937. * connected in parallel, DEVICES_CONNECTED should be set to 2.
  938. * In this case, the core framework knows nothing about this fact,
  939. * so we should tell it the _logical_ pagesize and anything necessary.
  940. */
  941. denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
  942. /*
  943. * On some SoCs, DEVICES_CONNECTED is not auto-detected.
  944. * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
  945. */
  946. if (denali->devs_per_cs == 0) {
  947. denali->devs_per_cs = 1;
  948. iowrite32(1, denali->reg + DEVICES_CONNECTED);
  949. }
  950. if (denali->devs_per_cs == 1)
  951. return 0;
  952. if (denali->devs_per_cs != 2) {
  953. dev_err(denali->dev, "unsupported number of devices %d\n",
  954. denali->devs_per_cs);
  955. return -EINVAL;
  956. }
  957. /* 2 chips in parallel */
  958. mtd->size <<= 1;
  959. mtd->erasesize <<= 1;
  960. mtd->writesize <<= 1;
  961. mtd->oobsize <<= 1;
  962. chip->chipsize <<= 1;
  963. chip->page_shift += 1;
  964. chip->phys_erase_shift += 1;
  965. chip->bbt_erase_shift += 1;
  966. chip->chip_shift += 1;
  967. chip->pagemask <<= 1;
  968. chip->ecc.size <<= 1;
  969. chip->ecc.bytes <<= 1;
  970. chip->ecc.strength <<= 1;
  971. denali->oob_skip_bytes <<= 1;
  972. return 0;
  973. }
  974. static int denali_attach_chip(struct nand_chip *chip)
  975. {
  976. struct mtd_info *mtd = nand_to_mtd(chip);
  977. struct denali_nand_info *denali = mtd_to_denali(mtd);
  978. int ret;
  979. if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
  980. denali->dma_avail = 1;
  981. if (denali->dma_avail) {
  982. int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
  983. ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
  984. if (ret) {
  985. dev_info(denali->dev,
  986. "Failed to set DMA mask. Disabling DMA.\n");
  987. denali->dma_avail = 0;
  988. }
  989. }
  990. if (denali->dma_avail) {
  991. chip->options |= NAND_USE_BOUNCE_BUFFER;
  992. chip->buf_align = 16;
  993. if (denali->caps & DENALI_CAP_DMA_64BIT)
  994. denali->setup_dma = denali_setup_dma64;
  995. else
  996. denali->setup_dma = denali_setup_dma32;
  997. }
  998. chip->bbt_options |= NAND_BBT_USE_FLASH;
  999. chip->bbt_options |= NAND_BBT_NO_OOB;
  1000. chip->ecc.mode = NAND_ECC_HW_SYNDROME;
  1001. chip->options |= NAND_NO_SUBPAGE_WRITE;
  1002. ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
  1003. mtd->oobsize - denali->oob_skip_bytes);
  1004. if (ret) {
  1005. dev_err(denali->dev, "Failed to setup ECC settings.\n");
  1006. return ret;
  1007. }
  1008. dev_dbg(denali->dev,
  1009. "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
  1010. chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
  1011. iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
  1012. FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
  1013. denali->reg + ECC_CORRECTION);
  1014. iowrite32(mtd->erasesize / mtd->writesize,
  1015. denali->reg + PAGES_PER_BLOCK);
  1016. iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
  1017. denali->reg + DEVICE_WIDTH);
  1018. iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
  1019. denali->reg + TWO_ROW_ADDR_CYCLES);
  1020. iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
  1021. iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
  1022. iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
  1023. iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
  1024. /* chip->ecc.steps is set by nand_scan_tail(); not available here */
  1025. iowrite32(mtd->writesize / chip->ecc.size,
  1026. denali->reg + CFG_NUM_DATA_BLOCKS);
  1027. mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
  1028. if (chip->options & NAND_BUSWIDTH_16) {
  1029. chip->read_buf = denali_read_buf16;
  1030. chip->write_buf = denali_write_buf16;
  1031. } else {
  1032. chip->read_buf = denali_read_buf;
  1033. chip->write_buf = denali_write_buf;
  1034. }
  1035. chip->ecc.read_page = denali_read_page;
  1036. chip->ecc.read_page_raw = denali_read_page_raw;
  1037. chip->ecc.write_page = denali_write_page;
  1038. chip->ecc.write_page_raw = denali_write_page_raw;
  1039. chip->ecc.read_oob = denali_read_oob;
  1040. chip->ecc.write_oob = denali_write_oob;
  1041. chip->erase = denali_erase;
  1042. ret = denali_multidev_fixup(denali);
  1043. if (ret)
  1044. return ret;
  1045. /*
  1046. * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
  1047. * use devm_kmalloc() because the memory allocated by devm_ does not
  1048. * guarantee DMA-safe alignment.
  1049. */
  1050. denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  1051. if (!denali->buf)
  1052. return -ENOMEM;
  1053. return 0;
  1054. }
  1055. static void denali_detach_chip(struct nand_chip *chip)
  1056. {
  1057. struct mtd_info *mtd = nand_to_mtd(chip);
  1058. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1059. kfree(denali->buf);
  1060. }
  1061. static const struct nand_controller_ops denali_controller_ops = {
  1062. .attach_chip = denali_attach_chip,
  1063. .detach_chip = denali_detach_chip,
  1064. };
  1065. int denali_init(struct denali_nand_info *denali)
  1066. {
  1067. struct nand_chip *chip = &denali->nand;
  1068. struct mtd_info *mtd = nand_to_mtd(chip);
  1069. u32 features = ioread32(denali->reg + FEATURES);
  1070. int ret;
  1071. mtd->dev.parent = denali->dev;
  1072. denali_hw_init(denali);
  1073. init_completion(&denali->complete);
  1074. spin_lock_init(&denali->irq_lock);
  1075. denali_clear_irq_all(denali);
  1076. ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
  1077. IRQF_SHARED, DENALI_NAND_NAME, denali);
  1078. if (ret) {
  1079. dev_err(denali->dev, "Unable to request IRQ\n");
  1080. return ret;
  1081. }
  1082. denali_enable_irq(denali);
  1083. denali_reset_banks(denali);
  1084. denali->active_bank = DENALI_INVALID_BANK;
  1085. nand_set_flash_node(chip, denali->dev->of_node);
  1086. /* Fallback to the default name if DT did not give "label" property */
  1087. if (!mtd->name)
  1088. mtd->name = "denali-nand";
  1089. chip->select_chip = denali_select_chip;
  1090. chip->read_byte = denali_read_byte;
  1091. chip->write_byte = denali_write_byte;
  1092. chip->read_word = denali_read_word;
  1093. chip->cmd_ctrl = denali_cmd_ctrl;
  1094. chip->dev_ready = denali_dev_ready;
  1095. chip->waitfunc = denali_waitfunc;
  1096. if (features & FEATURES__INDEX_ADDR) {
  1097. denali->host_read = denali_indexed_read;
  1098. denali->host_write = denali_indexed_write;
  1099. } else {
  1100. denali->host_read = denali_direct_read;
  1101. denali->host_write = denali_direct_write;
  1102. }
  1103. /* clk rate info is needed for setup_data_interface */
  1104. if (denali->clk_rate && denali->clk_x_rate)
  1105. chip->setup_data_interface = denali_setup_data_interface;
  1106. chip->dummy_controller.ops = &denali_controller_ops;
  1107. ret = nand_scan(mtd, denali->max_banks);
  1108. if (ret)
  1109. goto disable_irq;
  1110. ret = mtd_device_register(mtd, NULL, 0);
  1111. if (ret) {
  1112. dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
  1113. goto cleanup_nand;
  1114. }
  1115. return 0;
  1116. cleanup_nand:
  1117. nand_cleanup(chip);
  1118. disable_irq:
  1119. denali_disable_irq(denali);
  1120. return ret;
  1121. }
  1122. EXPORT_SYMBOL(denali_init);
  1123. void denali_remove(struct denali_nand_info *denali)
  1124. {
  1125. struct mtd_info *mtd = nand_to_mtd(&denali->nand);
  1126. nand_release(mtd);
  1127. denali_disable_irq(denali);
  1128. }
  1129. EXPORT_SYMBOL(denali_remove);