denali.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright © 2009-2010, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/bitfield.h>
  15. #include <linux/completion.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/module.h>
  20. #include <linux/mtd/mtd.h>
  21. #include <linux/mtd/rawnand.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include "denali.h"
  25. MODULE_LICENSE("GPL");
  26. #define DENALI_NAND_NAME "denali-nand"
  27. /* for Indexed Addressing */
  28. #define DENALI_INDEXED_CTRL 0x00
  29. #define DENALI_INDEXED_DATA 0x10
  30. #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
  31. #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
  32. #define DENALI_MAP10 (2 << 26) /* high-level control plane */
  33. #define DENALI_MAP11 (3 << 26) /* direct controller access */
  34. /* MAP11 access cycle type */
  35. #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
  36. #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
  37. #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
  38. /* MAP10 commands */
  39. #define DENALI_ERASE 0x01
  40. #define DENALI_BANK(denali) ((denali)->active_bank << 24)
  41. #define DENALI_INVALID_BANK -1
  42. #define DENALI_NR_BANKS 4
  43. /*
  44. * The bus interface clock, clk_x, is phase aligned with the core clock. The
  45. * clk_x is an integral multiple N of the core clk. The value N is configured
  46. * at IP delivery time, and its available value is 4, 5, or 6. We need to align
  47. * to the largest value to make it work with any possible configuration.
  48. */
  49. #define DENALI_CLK_X_MULT 6
  50. static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  51. {
  52. return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  53. }
  54. /*
  55. * Direct Addressing - the slave address forms the control information (command
  56. * type, bank, block, and page address). The slave data is the actual data to
  57. * be transferred. This mode requires 28 bits of address region allocated.
  58. */
  59. static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
  60. {
  61. return ioread32(denali->host + addr);
  62. }
  63. static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
  64. u32 data)
  65. {
  66. iowrite32(data, denali->host + addr);
  67. }
  68. /*
  69. * Indexed Addressing - address translation module intervenes in passing the
  70. * control information. This mode reduces the required address range. The
  71. * control information and transferred data are latched by the registers in
  72. * the translation module.
  73. */
  74. static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
  75. {
  76. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  77. return ioread32(denali->host + DENALI_INDEXED_DATA);
  78. }
  79. static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
  80. u32 data)
  81. {
  82. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  83. iowrite32(data, denali->host + DENALI_INDEXED_DATA);
  84. }
  85. /*
  86. * Use the configuration feature register to determine the maximum number of
  87. * banks that the hardware supports.
  88. */
  89. static void denali_detect_max_banks(struct denali_nand_info *denali)
  90. {
  91. uint32_t features = ioread32(denali->reg + FEATURES);
  92. denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
  93. /* the encoding changed from rev 5.0 to 5.1 */
  94. if (denali->revision < 0x0501)
  95. denali->max_banks <<= 1;
  96. }
  97. static void denali_enable_irq(struct denali_nand_info *denali)
  98. {
  99. int i;
  100. for (i = 0; i < DENALI_NR_BANKS; i++)
  101. iowrite32(U32_MAX, denali->reg + INTR_EN(i));
  102. iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
  103. }
  104. static void denali_disable_irq(struct denali_nand_info *denali)
  105. {
  106. int i;
  107. for (i = 0; i < DENALI_NR_BANKS; i++)
  108. iowrite32(0, denali->reg + INTR_EN(i));
  109. iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
  110. }
  111. static void denali_clear_irq(struct denali_nand_info *denali,
  112. int bank, uint32_t irq_status)
  113. {
  114. /* write one to clear bits */
  115. iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
  116. }
  117. static void denali_clear_irq_all(struct denali_nand_info *denali)
  118. {
  119. int i;
  120. for (i = 0; i < DENALI_NR_BANKS; i++)
  121. denali_clear_irq(denali, i, U32_MAX);
  122. }
  123. static irqreturn_t denali_isr(int irq, void *dev_id)
  124. {
  125. struct denali_nand_info *denali = dev_id;
  126. irqreturn_t ret = IRQ_NONE;
  127. uint32_t irq_status;
  128. int i;
  129. spin_lock(&denali->irq_lock);
  130. for (i = 0; i < DENALI_NR_BANKS; i++) {
  131. irq_status = ioread32(denali->reg + INTR_STATUS(i));
  132. if (irq_status)
  133. ret = IRQ_HANDLED;
  134. denali_clear_irq(denali, i, irq_status);
  135. if (i != denali->active_bank)
  136. continue;
  137. denali->irq_status |= irq_status;
  138. if (denali->irq_status & denali->irq_mask)
  139. complete(&denali->complete);
  140. }
  141. spin_unlock(&denali->irq_lock);
  142. return ret;
  143. }
  144. static void denali_reset_irq(struct denali_nand_info *denali)
  145. {
  146. unsigned long flags;
  147. spin_lock_irqsave(&denali->irq_lock, flags);
  148. denali->irq_status = 0;
  149. denali->irq_mask = 0;
  150. spin_unlock_irqrestore(&denali->irq_lock, flags);
  151. }
  152. static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
  153. uint32_t irq_mask)
  154. {
  155. unsigned long time_left, flags;
  156. uint32_t irq_status;
  157. spin_lock_irqsave(&denali->irq_lock, flags);
  158. irq_status = denali->irq_status;
  159. if (irq_mask & irq_status) {
  160. /* return immediately if the IRQ has already happened. */
  161. spin_unlock_irqrestore(&denali->irq_lock, flags);
  162. return irq_status;
  163. }
  164. denali->irq_mask = irq_mask;
  165. reinit_completion(&denali->complete);
  166. spin_unlock_irqrestore(&denali->irq_lock, flags);
  167. time_left = wait_for_completion_timeout(&denali->complete,
  168. msecs_to_jiffies(1000));
  169. if (!time_left) {
  170. dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
  171. irq_mask);
  172. return 0;
  173. }
  174. return denali->irq_status;
  175. }
  176. static uint32_t denali_check_irq(struct denali_nand_info *denali)
  177. {
  178. unsigned long flags;
  179. uint32_t irq_status;
  180. spin_lock_irqsave(&denali->irq_lock, flags);
  181. irq_status = denali->irq_status;
  182. spin_unlock_irqrestore(&denali->irq_lock, flags);
  183. return irq_status;
  184. }
  185. static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  186. {
  187. struct denali_nand_info *denali = mtd_to_denali(mtd);
  188. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  189. int i;
  190. for (i = 0; i < len; i++)
  191. buf[i] = denali->host_read(denali, addr);
  192. }
  193. static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  194. {
  195. struct denali_nand_info *denali = mtd_to_denali(mtd);
  196. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  197. int i;
  198. for (i = 0; i < len; i++)
  199. denali->host_write(denali, addr, buf[i]);
  200. }
  201. static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
  202. {
  203. struct denali_nand_info *denali = mtd_to_denali(mtd);
  204. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  205. uint16_t *buf16 = (uint16_t *)buf;
  206. int i;
  207. for (i = 0; i < len / 2; i++)
  208. buf16[i] = denali->host_read(denali, addr);
  209. }
  210. static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
  211. int len)
  212. {
  213. struct denali_nand_info *denali = mtd_to_denali(mtd);
  214. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  215. const uint16_t *buf16 = (const uint16_t *)buf;
  216. int i;
  217. for (i = 0; i < len / 2; i++)
  218. denali->host_write(denali, addr, buf16[i]);
  219. }
  220. static uint8_t denali_read_byte(struct mtd_info *mtd)
  221. {
  222. uint8_t byte;
  223. denali_read_buf(mtd, &byte, 1);
  224. return byte;
  225. }
  226. static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
  227. {
  228. denali_write_buf(mtd, &byte, 1);
  229. }
  230. static uint16_t denali_read_word(struct mtd_info *mtd)
  231. {
  232. uint16_t word;
  233. denali_read_buf16(mtd, (uint8_t *)&word, 2);
  234. return word;
  235. }
  236. static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
  237. {
  238. struct denali_nand_info *denali = mtd_to_denali(mtd);
  239. uint32_t type;
  240. if (ctrl & NAND_CLE)
  241. type = DENALI_MAP11_CMD;
  242. else if (ctrl & NAND_ALE)
  243. type = DENALI_MAP11_ADDR;
  244. else
  245. return;
  246. /*
  247. * Some commands are followed by chip->dev_ready or chip->waitfunc.
  248. * irq_status must be cleared here to catch the R/B# interrupt later.
  249. */
  250. if (ctrl & NAND_CTRL_CHANGE)
  251. denali_reset_irq(denali);
  252. denali->host_write(denali, DENALI_BANK(denali) | type, dat);
  253. }
  254. static int denali_dev_ready(struct mtd_info *mtd)
  255. {
  256. struct denali_nand_info *denali = mtd_to_denali(mtd);
  257. return !!(denali_check_irq(denali) & INTR__INT_ACT);
  258. }
  259. static int denali_check_erased_page(struct mtd_info *mtd,
  260. struct nand_chip *chip, uint8_t *buf,
  261. unsigned long uncor_ecc_flags,
  262. unsigned int max_bitflips)
  263. {
  264. struct denali_nand_info *denali = mtd_to_denali(mtd);
  265. uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
  266. int ecc_steps = chip->ecc.steps;
  267. int ecc_size = chip->ecc.size;
  268. int ecc_bytes = chip->ecc.bytes;
  269. int i, stat;
  270. for (i = 0; i < ecc_steps; i++) {
  271. if (!(uncor_ecc_flags & BIT(i)))
  272. continue;
  273. stat = nand_check_erased_ecc_chunk(buf, ecc_size,
  274. ecc_code, ecc_bytes,
  275. NULL, 0,
  276. chip->ecc.strength);
  277. if (stat < 0) {
  278. mtd->ecc_stats.failed++;
  279. } else {
  280. mtd->ecc_stats.corrected += stat;
  281. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  282. }
  283. buf += ecc_size;
  284. ecc_code += ecc_bytes;
  285. }
  286. return max_bitflips;
  287. }
  288. static int denali_hw_ecc_fixup(struct mtd_info *mtd,
  289. struct denali_nand_info *denali,
  290. unsigned long *uncor_ecc_flags)
  291. {
  292. struct nand_chip *chip = mtd_to_nand(mtd);
  293. int bank = denali->active_bank;
  294. uint32_t ecc_cor;
  295. unsigned int max_bitflips;
  296. ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
  297. ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
  298. if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
  299. /*
  300. * This flag is set when uncorrectable error occurs at least in
  301. * one ECC sector. We can not know "how many sectors", or
  302. * "which sector(s)". We need erase-page check for all sectors.
  303. */
  304. *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
  305. return 0;
  306. }
  307. max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
  308. /*
  309. * The register holds the maximum of per-sector corrected bitflips.
  310. * This is suitable for the return value of the ->read_page() callback.
  311. * Unfortunately, we can not know the total number of corrected bits in
  312. * the page. Increase the stats by max_bitflips. (compromised solution)
  313. */
  314. mtd->ecc_stats.corrected += max_bitflips;
  315. return max_bitflips;
  316. }
  317. static int denali_sw_ecc_fixup(struct mtd_info *mtd,
  318. struct denali_nand_info *denali,
  319. unsigned long *uncor_ecc_flags, uint8_t *buf)
  320. {
  321. unsigned int ecc_size = denali->nand.ecc.size;
  322. unsigned int bitflips = 0;
  323. unsigned int max_bitflips = 0;
  324. uint32_t err_addr, err_cor_info;
  325. unsigned int err_byte, err_sector, err_device;
  326. uint8_t err_cor_value;
  327. unsigned int prev_sector = 0;
  328. uint32_t irq_status;
  329. denali_reset_irq(denali);
  330. do {
  331. err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
  332. err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
  333. err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
  334. err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
  335. err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
  336. err_cor_info);
  337. err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
  338. err_cor_info);
  339. /* reset the bitflip counter when crossing ECC sector */
  340. if (err_sector != prev_sector)
  341. bitflips = 0;
  342. if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
  343. /*
  344. * Check later if this is a real ECC error, or
  345. * an erased sector.
  346. */
  347. *uncor_ecc_flags |= BIT(err_sector);
  348. } else if (err_byte < ecc_size) {
  349. /*
  350. * If err_byte is larger than ecc_size, means error
  351. * happened in OOB, so we ignore it. It's no need for
  352. * us to correct it err_device is represented the NAND
  353. * error bits are happened in if there are more than
  354. * one NAND connected.
  355. */
  356. int offset;
  357. unsigned int flips_in_byte;
  358. offset = (err_sector * ecc_size + err_byte) *
  359. denali->devs_per_cs + err_device;
  360. /* correct the ECC error */
  361. flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
  362. buf[offset] ^= err_cor_value;
  363. mtd->ecc_stats.corrected += flips_in_byte;
  364. bitflips += flips_in_byte;
  365. max_bitflips = max(max_bitflips, bitflips);
  366. }
  367. prev_sector = err_sector;
  368. } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
  369. /*
  370. * Once handle all ECC errors, controller will trigger an
  371. * ECC_TRANSACTION_DONE interrupt.
  372. */
  373. irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
  374. if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
  375. return -EIO;
  376. return max_bitflips;
  377. }
  378. static void denali_setup_dma64(struct denali_nand_info *denali,
  379. dma_addr_t dma_addr, int page, int write)
  380. {
  381. uint32_t mode;
  382. const int page_count = 1;
  383. mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
  384. /* DMA is a three step process */
  385. /*
  386. * 1. setup transfer type, interrupt when complete,
  387. * burst len = 64 bytes, the number of pages
  388. */
  389. denali->host_write(denali, mode,
  390. 0x01002000 | (64 << 16) | (write << 8) | page_count);
  391. /* 2. set memory low address */
  392. denali->host_write(denali, mode, lower_32_bits(dma_addr));
  393. /* 3. set memory high address */
  394. denali->host_write(denali, mode, upper_32_bits(dma_addr));
  395. }
  396. static void denali_setup_dma32(struct denali_nand_info *denali,
  397. dma_addr_t dma_addr, int page, int write)
  398. {
  399. uint32_t mode;
  400. const int page_count = 1;
  401. mode = DENALI_MAP10 | DENALI_BANK(denali);
  402. /* DMA is a four step process */
  403. /* 1. setup transfer type and # of pages */
  404. denali->host_write(denali, mode | page,
  405. 0x2000 | (write << 8) | page_count);
  406. /* 2. set memory high address bits 23:8 */
  407. denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
  408. /* 3. set memory low address bits 23:8 */
  409. denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
  410. /* 4. interrupt when complete, burst len = 64 bytes */
  411. denali->host_write(denali, mode | 0x14000, 0x2400);
  412. }
  413. static int denali_pio_read(struct denali_nand_info *denali, void *buf,
  414. size_t size, int page, int raw)
  415. {
  416. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  417. uint32_t *buf32 = (uint32_t *)buf;
  418. uint32_t irq_status, ecc_err_mask;
  419. int i;
  420. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  421. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  422. else
  423. ecc_err_mask = INTR__ECC_ERR;
  424. denali_reset_irq(denali);
  425. for (i = 0; i < size / 4; i++)
  426. *buf32++ = denali->host_read(denali, addr);
  427. irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
  428. if (!(irq_status & INTR__PAGE_XFER_INC))
  429. return -EIO;
  430. if (irq_status & INTR__ERASED_PAGE)
  431. memset(buf, 0xff, size);
  432. return irq_status & ecc_err_mask ? -EBADMSG : 0;
  433. }
  434. static int denali_pio_write(struct denali_nand_info *denali,
  435. const void *buf, size_t size, int page, int raw)
  436. {
  437. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  438. const uint32_t *buf32 = (uint32_t *)buf;
  439. uint32_t irq_status;
  440. int i;
  441. denali_reset_irq(denali);
  442. for (i = 0; i < size / 4; i++)
  443. denali->host_write(denali, addr, *buf32++);
  444. irq_status = denali_wait_for_irq(denali,
  445. INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
  446. if (!(irq_status & INTR__PROGRAM_COMP))
  447. return -EIO;
  448. return 0;
  449. }
  450. static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
  451. size_t size, int page, int raw, int write)
  452. {
  453. if (write)
  454. return denali_pio_write(denali, buf, size, page, raw);
  455. else
  456. return denali_pio_read(denali, buf, size, page, raw);
  457. }
  458. static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
  459. size_t size, int page, int raw, int write)
  460. {
  461. dma_addr_t dma_addr;
  462. uint32_t irq_mask, irq_status, ecc_err_mask;
  463. enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  464. int ret = 0;
  465. dma_addr = dma_map_single(denali->dev, buf, size, dir);
  466. if (dma_mapping_error(denali->dev, dma_addr)) {
  467. dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
  468. return denali_pio_xfer(denali, buf, size, page, raw, write);
  469. }
  470. if (write) {
  471. /*
  472. * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
  473. * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
  474. * when the page program is completed.
  475. */
  476. irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
  477. ecc_err_mask = 0;
  478. } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
  479. irq_mask = INTR__DMA_CMD_COMP;
  480. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  481. } else {
  482. irq_mask = INTR__DMA_CMD_COMP;
  483. ecc_err_mask = INTR__ECC_ERR;
  484. }
  485. iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
  486. denali_reset_irq(denali);
  487. denali->setup_dma(denali, dma_addr, page, write);
  488. irq_status = denali_wait_for_irq(denali, irq_mask);
  489. if (!(irq_status & INTR__DMA_CMD_COMP))
  490. ret = -EIO;
  491. else if (irq_status & ecc_err_mask)
  492. ret = -EBADMSG;
  493. iowrite32(0, denali->reg + DMA_ENABLE);
  494. dma_unmap_single(denali->dev, dma_addr, size, dir);
  495. if (irq_status & INTR__ERASED_PAGE)
  496. memset(buf, 0xff, size);
  497. return ret;
  498. }
  499. static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
  500. size_t size, int page, int raw, int write)
  501. {
  502. iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
  503. iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
  504. denali->reg + TRANSFER_SPARE_REG);
  505. if (denali->dma_avail)
  506. return denali_dma_xfer(denali, buf, size, page, raw, write);
  507. else
  508. return denali_pio_xfer(denali, buf, size, page, raw, write);
  509. }
  510. static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
  511. int page, int write)
  512. {
  513. struct denali_nand_info *denali = mtd_to_denali(mtd);
  514. int writesize = mtd->writesize;
  515. int oobsize = mtd->oobsize;
  516. uint8_t *bufpoi = chip->oob_poi;
  517. int ecc_steps = chip->ecc.steps;
  518. int ecc_size = chip->ecc.size;
  519. int ecc_bytes = chip->ecc.bytes;
  520. int oob_skip = denali->oob_skip_bytes;
  521. size_t size = writesize + oobsize;
  522. int i, pos, len;
  523. /* BBM at the beginning of the OOB area */
  524. if (write)
  525. nand_prog_page_begin_op(chip, page, writesize, bufpoi,
  526. oob_skip);
  527. else
  528. nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
  529. bufpoi += oob_skip;
  530. /* OOB ECC */
  531. for (i = 0; i < ecc_steps; i++) {
  532. pos = ecc_size + i * (ecc_size + ecc_bytes);
  533. len = ecc_bytes;
  534. if (pos >= writesize)
  535. pos += oob_skip;
  536. else if (pos + len > writesize)
  537. len = writesize - pos;
  538. if (write)
  539. nand_change_write_column_op(chip, pos, bufpoi, len,
  540. false);
  541. else
  542. nand_change_read_column_op(chip, pos, bufpoi, len,
  543. false);
  544. bufpoi += len;
  545. if (len < ecc_bytes) {
  546. len = ecc_bytes - len;
  547. if (write)
  548. nand_change_write_column_op(chip, writesize +
  549. oob_skip, bufpoi,
  550. len, false);
  551. else
  552. nand_change_read_column_op(chip, writesize +
  553. oob_skip, bufpoi,
  554. len, false);
  555. bufpoi += len;
  556. }
  557. }
  558. /* OOB free */
  559. len = oobsize - (bufpoi - chip->oob_poi);
  560. if (write)
  561. nand_change_write_column_op(chip, size - len, bufpoi, len,
  562. false);
  563. else
  564. nand_change_read_column_op(chip, size - len, bufpoi, len,
  565. false);
  566. }
  567. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  568. uint8_t *buf, int oob_required, int page)
  569. {
  570. struct denali_nand_info *denali = mtd_to_denali(mtd);
  571. int writesize = mtd->writesize;
  572. int oobsize = mtd->oobsize;
  573. int ecc_steps = chip->ecc.steps;
  574. int ecc_size = chip->ecc.size;
  575. int ecc_bytes = chip->ecc.bytes;
  576. void *tmp_buf = denali->buf;
  577. int oob_skip = denali->oob_skip_bytes;
  578. size_t size = writesize + oobsize;
  579. int ret, i, pos, len;
  580. ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
  581. if (ret)
  582. return ret;
  583. /* Arrange the buffer for syndrome payload/ecc layout */
  584. if (buf) {
  585. for (i = 0; i < ecc_steps; i++) {
  586. pos = i * (ecc_size + ecc_bytes);
  587. len = ecc_size;
  588. if (pos >= writesize)
  589. pos += oob_skip;
  590. else if (pos + len > writesize)
  591. len = writesize - pos;
  592. memcpy(buf, tmp_buf + pos, len);
  593. buf += len;
  594. if (len < ecc_size) {
  595. len = ecc_size - len;
  596. memcpy(buf, tmp_buf + writesize + oob_skip,
  597. len);
  598. buf += len;
  599. }
  600. }
  601. }
  602. if (oob_required) {
  603. uint8_t *oob = chip->oob_poi;
  604. /* BBM at the beginning of the OOB area */
  605. memcpy(oob, tmp_buf + writesize, oob_skip);
  606. oob += oob_skip;
  607. /* OOB ECC */
  608. for (i = 0; i < ecc_steps; i++) {
  609. pos = ecc_size + i * (ecc_size + ecc_bytes);
  610. len = ecc_bytes;
  611. if (pos >= writesize)
  612. pos += oob_skip;
  613. else if (pos + len > writesize)
  614. len = writesize - pos;
  615. memcpy(oob, tmp_buf + pos, len);
  616. oob += len;
  617. if (len < ecc_bytes) {
  618. len = ecc_bytes - len;
  619. memcpy(oob, tmp_buf + writesize + oob_skip,
  620. len);
  621. oob += len;
  622. }
  623. }
  624. /* OOB free */
  625. len = oobsize - (oob - chip->oob_poi);
  626. memcpy(oob, tmp_buf + size - len, len);
  627. }
  628. return 0;
  629. }
  630. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  631. int page)
  632. {
  633. denali_oob_xfer(mtd, chip, page, 0);
  634. return 0;
  635. }
  636. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  637. int page)
  638. {
  639. struct denali_nand_info *denali = mtd_to_denali(mtd);
  640. denali_reset_irq(denali);
  641. denali_oob_xfer(mtd, chip, page, 1);
  642. return nand_prog_page_end_op(chip);
  643. }
  644. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  645. uint8_t *buf, int oob_required, int page)
  646. {
  647. struct denali_nand_info *denali = mtd_to_denali(mtd);
  648. unsigned long uncor_ecc_flags = 0;
  649. int stat = 0;
  650. int ret;
  651. ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
  652. if (ret && ret != -EBADMSG)
  653. return ret;
  654. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  655. stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
  656. else if (ret == -EBADMSG)
  657. stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
  658. if (stat < 0)
  659. return stat;
  660. if (uncor_ecc_flags) {
  661. ret = denali_read_oob(mtd, chip, page);
  662. if (ret)
  663. return ret;
  664. stat = denali_check_erased_page(mtd, chip, buf,
  665. uncor_ecc_flags, stat);
  666. }
  667. return stat;
  668. }
  669. static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  670. const uint8_t *buf, int oob_required, int page)
  671. {
  672. struct denali_nand_info *denali = mtd_to_denali(mtd);
  673. int writesize = mtd->writesize;
  674. int oobsize = mtd->oobsize;
  675. int ecc_steps = chip->ecc.steps;
  676. int ecc_size = chip->ecc.size;
  677. int ecc_bytes = chip->ecc.bytes;
  678. void *tmp_buf = denali->buf;
  679. int oob_skip = denali->oob_skip_bytes;
  680. size_t size = writesize + oobsize;
  681. int i, pos, len;
  682. /*
  683. * Fill the buffer with 0xff first except the full page transfer.
  684. * This simplifies the logic.
  685. */
  686. if (!buf || !oob_required)
  687. memset(tmp_buf, 0xff, size);
  688. /* Arrange the buffer for syndrome payload/ecc layout */
  689. if (buf) {
  690. for (i = 0; i < ecc_steps; i++) {
  691. pos = i * (ecc_size + ecc_bytes);
  692. len = ecc_size;
  693. if (pos >= writesize)
  694. pos += oob_skip;
  695. else if (pos + len > writesize)
  696. len = writesize - pos;
  697. memcpy(tmp_buf + pos, buf, len);
  698. buf += len;
  699. if (len < ecc_size) {
  700. len = ecc_size - len;
  701. memcpy(tmp_buf + writesize + oob_skip, buf,
  702. len);
  703. buf += len;
  704. }
  705. }
  706. }
  707. if (oob_required) {
  708. const uint8_t *oob = chip->oob_poi;
  709. /* BBM at the beginning of the OOB area */
  710. memcpy(tmp_buf + writesize, oob, oob_skip);
  711. oob += oob_skip;
  712. /* OOB ECC */
  713. for (i = 0; i < ecc_steps; i++) {
  714. pos = ecc_size + i * (ecc_size + ecc_bytes);
  715. len = ecc_bytes;
  716. if (pos >= writesize)
  717. pos += oob_skip;
  718. else if (pos + len > writesize)
  719. len = writesize - pos;
  720. memcpy(tmp_buf + pos, oob, len);
  721. oob += len;
  722. if (len < ecc_bytes) {
  723. len = ecc_bytes - len;
  724. memcpy(tmp_buf + writesize + oob_skip, oob,
  725. len);
  726. oob += len;
  727. }
  728. }
  729. /* OOB free */
  730. len = oobsize - (oob - chip->oob_poi);
  731. memcpy(tmp_buf + size - len, oob, len);
  732. }
  733. return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
  734. }
  735. static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  736. const uint8_t *buf, int oob_required, int page)
  737. {
  738. struct denali_nand_info *denali = mtd_to_denali(mtd);
  739. return denali_data_xfer(denali, (void *)buf, mtd->writesize,
  740. page, 0, 1);
  741. }
  742. static void denali_select_chip(struct mtd_info *mtd, int chip)
  743. {
  744. struct denali_nand_info *denali = mtd_to_denali(mtd);
  745. denali->active_bank = chip;
  746. }
  747. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  748. {
  749. struct denali_nand_info *denali = mtd_to_denali(mtd);
  750. uint32_t irq_status;
  751. /* R/B# pin transitioned from low to high? */
  752. irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
  753. return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
  754. }
  755. static int denali_erase(struct mtd_info *mtd, int page)
  756. {
  757. struct denali_nand_info *denali = mtd_to_denali(mtd);
  758. uint32_t irq_status;
  759. denali_reset_irq(denali);
  760. denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
  761. DENALI_ERASE);
  762. /* wait for erase to complete or failure to occur */
  763. irq_status = denali_wait_for_irq(denali,
  764. INTR__ERASE_COMP | INTR__ERASE_FAIL);
  765. return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
  766. }
  767. static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
  768. const struct nand_data_interface *conf)
  769. {
  770. struct denali_nand_info *denali = mtd_to_denali(mtd);
  771. const struct nand_sdr_timings *timings;
  772. unsigned long t_clk;
  773. int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
  774. int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
  775. int addr_2_data_mask;
  776. uint32_t tmp;
  777. timings = nand_get_sdr_timings(conf);
  778. if (IS_ERR(timings))
  779. return PTR_ERR(timings);
  780. /* clk_x period in picoseconds */
  781. t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
  782. if (!t_clk)
  783. return -EINVAL;
  784. if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
  785. return 0;
  786. /* tREA -> ACC_CLKS */
  787. acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
  788. acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
  789. tmp = ioread32(denali->reg + ACC_CLKS);
  790. tmp &= ~ACC_CLKS__VALUE;
  791. tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
  792. iowrite32(tmp, denali->reg + ACC_CLKS);
  793. /* tRWH -> RE_2_WE */
  794. re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
  795. re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
  796. tmp = ioread32(denali->reg + RE_2_WE);
  797. tmp &= ~RE_2_WE__VALUE;
  798. tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
  799. iowrite32(tmp, denali->reg + RE_2_WE);
  800. /* tRHZ -> RE_2_RE */
  801. re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
  802. re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
  803. tmp = ioread32(denali->reg + RE_2_RE);
  804. tmp &= ~RE_2_RE__VALUE;
  805. tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
  806. iowrite32(tmp, denali->reg + RE_2_RE);
  807. /*
  808. * tCCS, tWHR -> WE_2_RE
  809. *
  810. * With WE_2_RE properly set, the Denali controller automatically takes
  811. * care of the delay; the driver need not set NAND_WAIT_TCCS.
  812. */
  813. we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
  814. t_clk);
  815. we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
  816. tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
  817. tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
  818. tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
  819. iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
  820. /* tADL -> ADDR_2_DATA */
  821. /* for older versions, ADDR_2_DATA is only 6 bit wide */
  822. addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  823. if (denali->revision < 0x0501)
  824. addr_2_data_mask >>= 1;
  825. addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
  826. addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
  827. tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
  828. tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  829. tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
  830. iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
  831. /* tREH, tWH -> RDWR_EN_HI_CNT */
  832. rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
  833. t_clk);
  834. rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
  835. tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
  836. tmp &= ~RDWR_EN_HI_CNT__VALUE;
  837. tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
  838. iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
  839. /* tRP, tWP -> RDWR_EN_LO_CNT */
  840. rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
  841. t_clk);
  842. rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
  843. t_clk);
  844. rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
  845. rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
  846. rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
  847. tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
  848. tmp &= ~RDWR_EN_LO_CNT__VALUE;
  849. tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
  850. iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
  851. /* tCS, tCEA -> CS_SETUP_CNT */
  852. cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
  853. (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
  854. 0);
  855. cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
  856. tmp = ioread32(denali->reg + CS_SETUP_CNT);
  857. tmp &= ~CS_SETUP_CNT__VALUE;
  858. tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
  859. iowrite32(tmp, denali->reg + CS_SETUP_CNT);
  860. return 0;
  861. }
  862. static void denali_reset_banks(struct denali_nand_info *denali)
  863. {
  864. u32 irq_status;
  865. int i;
  866. for (i = 0; i < denali->max_banks; i++) {
  867. denali->active_bank = i;
  868. denali_reset_irq(denali);
  869. iowrite32(DEVICE_RESET__BANK(i),
  870. denali->reg + DEVICE_RESET);
  871. irq_status = denali_wait_for_irq(denali,
  872. INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
  873. if (!(irq_status & INTR__INT_ACT))
  874. break;
  875. }
  876. dev_dbg(denali->dev, "%d chips connected\n", i);
  877. denali->max_banks = i;
  878. }
  879. static void denali_hw_init(struct denali_nand_info *denali)
  880. {
  881. /*
  882. * The REVISION register may not be reliable. Platforms are allowed to
  883. * override it.
  884. */
  885. if (!denali->revision)
  886. denali->revision = swab16(ioread32(denali->reg + REVISION));
  887. /*
  888. * tell driver how many bit controller will skip before
  889. * writing ECC code in OOB, this register may be already
  890. * set by firmware. So we read this value out.
  891. * if this value is 0, just let it be.
  892. */
  893. denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
  894. denali_detect_max_banks(denali);
  895. iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
  896. iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
  897. iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
  898. }
  899. int denali_calc_ecc_bytes(int step_size, int strength)
  900. {
  901. /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
  902. return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
  903. }
  904. EXPORT_SYMBOL(denali_calc_ecc_bytes);
  905. static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
  906. struct denali_nand_info *denali)
  907. {
  908. int oobavail = mtd->oobsize - denali->oob_skip_bytes;
  909. int ret;
  910. /*
  911. * If .size and .strength are already set (usually by DT),
  912. * check if they are supported by this controller.
  913. */
  914. if (chip->ecc.size && chip->ecc.strength)
  915. return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
  916. /*
  917. * We want .size and .strength closest to the chip's requirement
  918. * unless NAND_ECC_MAXIMIZE is requested.
  919. */
  920. if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
  921. ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
  922. if (!ret)
  923. return 0;
  924. }
  925. /* Max ECC strength is the last thing we can do */
  926. return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
  927. }
  928. static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
  929. struct mtd_oob_region *oobregion)
  930. {
  931. struct denali_nand_info *denali = mtd_to_denali(mtd);
  932. struct nand_chip *chip = mtd_to_nand(mtd);
  933. if (section)
  934. return -ERANGE;
  935. oobregion->offset = denali->oob_skip_bytes;
  936. oobregion->length = chip->ecc.total;
  937. return 0;
  938. }
  939. static int denali_ooblayout_free(struct mtd_info *mtd, int section,
  940. struct mtd_oob_region *oobregion)
  941. {
  942. struct denali_nand_info *denali = mtd_to_denali(mtd);
  943. struct nand_chip *chip = mtd_to_nand(mtd);
  944. if (section)
  945. return -ERANGE;
  946. oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
  947. oobregion->length = mtd->oobsize - oobregion->offset;
  948. return 0;
  949. }
  950. static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
  951. .ecc = denali_ooblayout_ecc,
  952. .free = denali_ooblayout_free,
  953. };
  954. static int denali_multidev_fixup(struct denali_nand_info *denali)
  955. {
  956. struct nand_chip *chip = &denali->nand;
  957. struct mtd_info *mtd = nand_to_mtd(chip);
  958. /*
  959. * Support for multi device:
  960. * When the IP configuration is x16 capable and two x8 chips are
  961. * connected in parallel, DEVICES_CONNECTED should be set to 2.
  962. * In this case, the core framework knows nothing about this fact,
  963. * so we should tell it the _logical_ pagesize and anything necessary.
  964. */
  965. denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
  966. /*
  967. * On some SoCs, DEVICES_CONNECTED is not auto-detected.
  968. * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
  969. */
  970. if (denali->devs_per_cs == 0) {
  971. denali->devs_per_cs = 1;
  972. iowrite32(1, denali->reg + DEVICES_CONNECTED);
  973. }
  974. if (denali->devs_per_cs == 1)
  975. return 0;
  976. if (denali->devs_per_cs != 2) {
  977. dev_err(denali->dev, "unsupported number of devices %d\n",
  978. denali->devs_per_cs);
  979. return -EINVAL;
  980. }
  981. /* 2 chips in parallel */
  982. mtd->size <<= 1;
  983. mtd->erasesize <<= 1;
  984. mtd->writesize <<= 1;
  985. mtd->oobsize <<= 1;
  986. chip->chipsize <<= 1;
  987. chip->page_shift += 1;
  988. chip->phys_erase_shift += 1;
  989. chip->bbt_erase_shift += 1;
  990. chip->chip_shift += 1;
  991. chip->pagemask <<= 1;
  992. chip->ecc.size <<= 1;
  993. chip->ecc.bytes <<= 1;
  994. chip->ecc.strength <<= 1;
  995. denali->oob_skip_bytes <<= 1;
  996. return 0;
  997. }
  998. int denali_init(struct denali_nand_info *denali)
  999. {
  1000. struct nand_chip *chip = &denali->nand;
  1001. struct mtd_info *mtd = nand_to_mtd(chip);
  1002. u32 features = ioread32(denali->reg + FEATURES);
  1003. int ret;
  1004. mtd->dev.parent = denali->dev;
  1005. denali_hw_init(denali);
  1006. init_completion(&denali->complete);
  1007. spin_lock_init(&denali->irq_lock);
  1008. denali_clear_irq_all(denali);
  1009. ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
  1010. IRQF_SHARED, DENALI_NAND_NAME, denali);
  1011. if (ret) {
  1012. dev_err(denali->dev, "Unable to request IRQ\n");
  1013. return ret;
  1014. }
  1015. denali_enable_irq(denali);
  1016. denali_reset_banks(denali);
  1017. denali->active_bank = DENALI_INVALID_BANK;
  1018. nand_set_flash_node(chip, denali->dev->of_node);
  1019. /* Fallback to the default name if DT did not give "label" property */
  1020. if (!mtd->name)
  1021. mtd->name = "denali-nand";
  1022. chip->select_chip = denali_select_chip;
  1023. chip->read_byte = denali_read_byte;
  1024. chip->write_byte = denali_write_byte;
  1025. chip->read_word = denali_read_word;
  1026. chip->cmd_ctrl = denali_cmd_ctrl;
  1027. chip->dev_ready = denali_dev_ready;
  1028. chip->waitfunc = denali_waitfunc;
  1029. if (features & FEATURES__INDEX_ADDR) {
  1030. denali->host_read = denali_indexed_read;
  1031. denali->host_write = denali_indexed_write;
  1032. } else {
  1033. denali->host_read = denali_direct_read;
  1034. denali->host_write = denali_direct_write;
  1035. }
  1036. /* clk rate info is needed for setup_data_interface */
  1037. if (denali->clk_x_rate)
  1038. chip->setup_data_interface = denali_setup_data_interface;
  1039. ret = nand_scan_ident(mtd, denali->max_banks, NULL);
  1040. if (ret)
  1041. goto disable_irq;
  1042. if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
  1043. denali->dma_avail = 1;
  1044. if (denali->dma_avail) {
  1045. int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
  1046. ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
  1047. if (ret) {
  1048. dev_info(denali->dev,
  1049. "Failed to set DMA mask. Disabling DMA.\n");
  1050. denali->dma_avail = 0;
  1051. }
  1052. }
  1053. if (denali->dma_avail) {
  1054. chip->options |= NAND_USE_BOUNCE_BUFFER;
  1055. chip->buf_align = 16;
  1056. if (denali->caps & DENALI_CAP_DMA_64BIT)
  1057. denali->setup_dma = denali_setup_dma64;
  1058. else
  1059. denali->setup_dma = denali_setup_dma32;
  1060. }
  1061. chip->bbt_options |= NAND_BBT_USE_FLASH;
  1062. chip->bbt_options |= NAND_BBT_NO_OOB;
  1063. chip->ecc.mode = NAND_ECC_HW_SYNDROME;
  1064. chip->options |= NAND_NO_SUBPAGE_WRITE;
  1065. ret = denali_ecc_setup(mtd, chip, denali);
  1066. if (ret) {
  1067. dev_err(denali->dev, "Failed to setup ECC settings.\n");
  1068. goto disable_irq;
  1069. }
  1070. dev_dbg(denali->dev,
  1071. "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
  1072. chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
  1073. iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
  1074. FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
  1075. denali->reg + ECC_CORRECTION);
  1076. iowrite32(mtd->erasesize / mtd->writesize,
  1077. denali->reg + PAGES_PER_BLOCK);
  1078. iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
  1079. denali->reg + DEVICE_WIDTH);
  1080. iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
  1081. denali->reg + TWO_ROW_ADDR_CYCLES);
  1082. iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
  1083. iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
  1084. iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
  1085. iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
  1086. /* chip->ecc.steps is set by nand_scan_tail(); not available here */
  1087. iowrite32(mtd->writesize / chip->ecc.size,
  1088. denali->reg + CFG_NUM_DATA_BLOCKS);
  1089. mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
  1090. if (chip->options & NAND_BUSWIDTH_16) {
  1091. chip->read_buf = denali_read_buf16;
  1092. chip->write_buf = denali_write_buf16;
  1093. } else {
  1094. chip->read_buf = denali_read_buf;
  1095. chip->write_buf = denali_write_buf;
  1096. }
  1097. chip->ecc.read_page = denali_read_page;
  1098. chip->ecc.read_page_raw = denali_read_page_raw;
  1099. chip->ecc.write_page = denali_write_page;
  1100. chip->ecc.write_page_raw = denali_write_page_raw;
  1101. chip->ecc.read_oob = denali_read_oob;
  1102. chip->ecc.write_oob = denali_write_oob;
  1103. chip->erase = denali_erase;
  1104. ret = denali_multidev_fixup(denali);
  1105. if (ret)
  1106. goto disable_irq;
  1107. /*
  1108. * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
  1109. * use devm_kmalloc() because the memory allocated by devm_ does not
  1110. * guarantee DMA-safe alignment.
  1111. */
  1112. denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  1113. if (!denali->buf) {
  1114. ret = -ENOMEM;
  1115. goto disable_irq;
  1116. }
  1117. ret = nand_scan_tail(mtd);
  1118. if (ret)
  1119. goto free_buf;
  1120. ret = mtd_device_register(mtd, NULL, 0);
  1121. if (ret) {
  1122. dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
  1123. goto cleanup_nand;
  1124. }
  1125. return 0;
  1126. cleanup_nand:
  1127. nand_cleanup(chip);
  1128. free_buf:
  1129. kfree(denali->buf);
  1130. disable_irq:
  1131. denali_disable_irq(denali);
  1132. return ret;
  1133. }
  1134. EXPORT_SYMBOL(denali_init);
  1135. void denali_remove(struct denali_nand_info *denali)
  1136. {
  1137. struct mtd_info *mtd = nand_to_mtd(&denali->nand);
  1138. nand_release(mtd);
  1139. kfree(denali->buf);
  1140. denali_disable_irq(denali);
  1141. }
  1142. EXPORT_SYMBOL(denali_remove);