denali.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright © 2009-2010, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include <linux/interrupt.h>
  20. #include <linux/delay.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/wait.h>
  23. #include <linux/mutex.h>
  24. #include <linux/mtd/mtd.h>
  25. #include <linux/module.h>
  26. #include "denali.h"
  27. MODULE_LICENSE("GPL");
  28. /*
  29. * We define a module parameter that allows the user to override
  30. * the hardware and decide what timing mode should be used.
  31. */
  32. #define NAND_DEFAULT_TIMINGS -1
  33. static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
  34. module_param(onfi_timing_mode, int, S_IRUGO);
  35. MODULE_PARM_DESC(onfi_timing_mode,
  36. "Overrides default ONFI setting. -1 indicates use default timings");
  37. #define DENALI_NAND_NAME "denali-nand"
  38. /*
  39. * We define a macro here that combines all interrupts this driver uses into
  40. * a single constant value, for convenience.
  41. */
  42. #define DENALI_IRQ_ALL (INTR__DMA_CMD_COMP | \
  43. INTR__ECC_TRANSACTION_DONE | \
  44. INTR__ECC_ERR | \
  45. INTR__PROGRAM_FAIL | \
  46. INTR__LOAD_COMP | \
  47. INTR__PROGRAM_COMP | \
  48. INTR__TIME_OUT | \
  49. INTR__ERASE_FAIL | \
  50. INTR__RST_COMP | \
  51. INTR__ERASE_COMP)
  52. /*
  53. * indicates whether or not the internal value for the flash bank is
  54. * valid or not
  55. */
  56. #define CHIP_SELECT_INVALID -1
  57. /*
  58. * This macro divides two integers and rounds fractional values up
  59. * to the nearest integer value.
  60. */
  61. #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
  62. /*
  63. * this macro allows us to convert from an MTD structure to our own
  64. * device context (denali) structure.
  65. */
  66. static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  67. {
  68. return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  69. }
  70. /*
  71. * These constants are defined by the driver to enable common driver
  72. * configuration options.
  73. */
  74. #define SPARE_ACCESS 0x41
  75. #define MAIN_ACCESS 0x42
  76. #define MAIN_SPARE_ACCESS 0x43
  77. #define DENALI_READ 0
  78. #define DENALI_WRITE 0x100
  79. /*
  80. * this is a helper macro that allows us to
  81. * format the bank into the proper bits for the controller
  82. */
  83. #define BANK(x) ((x) << 24)
  84. /* forward declarations */
  85. static void clear_interrupts(struct denali_nand_info *denali);
  86. static uint32_t wait_for_irq(struct denali_nand_info *denali,
  87. uint32_t irq_mask);
  88. static void denali_irq_enable(struct denali_nand_info *denali,
  89. uint32_t int_mask);
  90. static uint32_t read_interrupt_status(struct denali_nand_info *denali);
  91. /*
  92. * Certain operations for the denali NAND controller use an indexed mode to
  93. * read/write data. The operation is performed by writing the address value
  94. * of the command to the device memory followed by the data. This function
  95. * abstracts this common operation.
  96. */
  97. static void index_addr(struct denali_nand_info *denali,
  98. uint32_t address, uint32_t data)
  99. {
  100. iowrite32(address, denali->flash_mem);
  101. iowrite32(data, denali->flash_mem + 0x10);
  102. }
  103. /* Perform an indexed read of the device */
  104. static void index_addr_read_data(struct denali_nand_info *denali,
  105. uint32_t address, uint32_t *pdata)
  106. {
  107. iowrite32(address, denali->flash_mem);
  108. *pdata = ioread32(denali->flash_mem + 0x10);
  109. }
  110. /*
  111. * We need to buffer some data for some of the NAND core routines.
  112. * The operations manage buffering that data.
  113. */
  114. static void reset_buf(struct denali_nand_info *denali)
  115. {
  116. denali->buf.head = denali->buf.tail = 0;
  117. }
  118. static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
  119. {
  120. denali->buf.buf[denali->buf.tail++] = byte;
  121. }
  122. /* reads the status of the device */
  123. static void read_status(struct denali_nand_info *denali)
  124. {
  125. uint32_t cmd;
  126. /* initialize the data buffer to store status */
  127. reset_buf(denali);
  128. cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
  129. if (cmd)
  130. write_byte_to_buf(denali, NAND_STATUS_WP);
  131. else
  132. write_byte_to_buf(denali, 0);
  133. }
  134. /* resets a specific device connected to the core */
  135. static void reset_bank(struct denali_nand_info *denali)
  136. {
  137. uint32_t irq_status;
  138. uint32_t irq_mask = INTR__RST_COMP | INTR__TIME_OUT;
  139. clear_interrupts(denali);
  140. iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
  141. irq_status = wait_for_irq(denali, irq_mask);
  142. if (irq_status & INTR__TIME_OUT)
  143. dev_err(denali->dev, "reset bank failed.\n");
  144. }
  145. /* Reset the flash controller */
  146. static uint16_t denali_nand_reset(struct denali_nand_info *denali)
  147. {
  148. int i;
  149. for (i = 0; i < denali->max_banks; i++)
  150. iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
  151. denali->flash_reg + INTR_STATUS(i));
  152. for (i = 0; i < denali->max_banks; i++) {
  153. iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
  154. while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
  155. (INTR__RST_COMP | INTR__TIME_OUT)))
  156. cpu_relax();
  157. if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
  158. INTR__TIME_OUT)
  159. dev_dbg(denali->dev,
  160. "NAND Reset operation timed out on bank %d\n", i);
  161. }
  162. for (i = 0; i < denali->max_banks; i++)
  163. iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
  164. denali->flash_reg + INTR_STATUS(i));
  165. return PASS;
  166. }
  167. /*
  168. * this routine calculates the ONFI timing values for a given mode and
  169. * programs the clocking register accordingly. The mode is determined by
  170. * the get_onfi_nand_para routine.
  171. */
  172. static void nand_onfi_timing_set(struct denali_nand_info *denali,
  173. uint16_t mode)
  174. {
  175. uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
  176. uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
  177. uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
  178. uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
  179. uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
  180. uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
  181. uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
  182. uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
  183. uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
  184. uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
  185. uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
  186. uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
  187. uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
  188. uint16_t dv_window = 0;
  189. uint16_t en_lo, en_hi;
  190. uint16_t acc_clks;
  191. uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
  192. en_lo = CEIL_DIV(Trp[mode], CLK_X);
  193. en_hi = CEIL_DIV(Treh[mode], CLK_X);
  194. #if ONFI_BLOOM_TIME
  195. if ((en_hi * CLK_X) < (Treh[mode] + 2))
  196. en_hi++;
  197. #endif
  198. if ((en_lo + en_hi) * CLK_X < Trc[mode])
  199. en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
  200. if ((en_lo + en_hi) < CLK_MULTI)
  201. en_lo += CLK_MULTI - en_lo - en_hi;
  202. while (dv_window < 8) {
  203. data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
  204. data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
  205. data_invalid = data_invalid_rhoh < data_invalid_rloh ?
  206. data_invalid_rhoh : data_invalid_rloh;
  207. dv_window = data_invalid - Trea[mode];
  208. if (dv_window < 8)
  209. en_lo++;
  210. }
  211. acc_clks = CEIL_DIV(Trea[mode], CLK_X);
  212. while (acc_clks * CLK_X - Trea[mode] < 3)
  213. acc_clks++;
  214. if (data_invalid - acc_clks * CLK_X < 2)
  215. dev_warn(denali->dev, "%s, Line %d: Warning!\n",
  216. __FILE__, __LINE__);
  217. addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
  218. re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
  219. re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
  220. we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
  221. cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
  222. if (cs_cnt == 0)
  223. cs_cnt = 1;
  224. if (Tcea[mode]) {
  225. while (cs_cnt * CLK_X + Trea[mode] < Tcea[mode])
  226. cs_cnt++;
  227. }
  228. #if MODE5_WORKAROUND
  229. if (mode == 5)
  230. acc_clks = 5;
  231. #endif
  232. /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
  233. if (ioread32(denali->flash_reg + MANUFACTURER_ID) == 0 &&
  234. ioread32(denali->flash_reg + DEVICE_ID) == 0x88)
  235. acc_clks = 6;
  236. iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
  237. iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
  238. iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
  239. iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
  240. iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
  241. iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
  242. iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
  243. iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
  244. }
  245. /* queries the NAND device to see what ONFI modes it supports. */
  246. static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
  247. {
  248. int i;
  249. /*
  250. * we needn't to do a reset here because driver has already
  251. * reset all the banks before
  252. */
  253. if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
  254. ONFI_TIMING_MODE__VALUE))
  255. return FAIL;
  256. for (i = 5; i > 0; i--) {
  257. if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
  258. (0x01 << i))
  259. break;
  260. }
  261. nand_onfi_timing_set(denali, i);
  262. /*
  263. * By now, all the ONFI devices we know support the page cache
  264. * rw feature. So here we enable the pipeline_rw_ahead feature
  265. */
  266. /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
  267. /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
  268. return PASS;
  269. }
  270. static void get_samsung_nand_para(struct denali_nand_info *denali,
  271. uint8_t device_id)
  272. {
  273. if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
  274. /* Set timing register values according to datasheet */
  275. iowrite32(5, denali->flash_reg + ACC_CLKS);
  276. iowrite32(20, denali->flash_reg + RE_2_WE);
  277. iowrite32(12, denali->flash_reg + WE_2_RE);
  278. iowrite32(14, denali->flash_reg + ADDR_2_DATA);
  279. iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
  280. iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
  281. iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
  282. }
  283. }
  284. static void get_toshiba_nand_para(struct denali_nand_info *denali)
  285. {
  286. /*
  287. * Workaround to fix a controller bug which reports a wrong
  288. * spare area size for some kind of Toshiba NAND device
  289. */
  290. if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
  291. (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64))
  292. iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  293. }
  294. static void get_hynix_nand_para(struct denali_nand_info *denali,
  295. uint8_t device_id)
  296. {
  297. switch (device_id) {
  298. case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
  299. case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
  300. iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
  301. iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
  302. iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  303. iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
  304. break;
  305. default:
  306. dev_warn(denali->dev,
  307. "Unknown Hynix NAND (Device ID: 0x%x).\n"
  308. "Will use default parameter values instead.\n",
  309. device_id);
  310. }
  311. }
  312. /*
  313. * determines how many NAND chips are connected to the controller. Note for
  314. * Intel CE4100 devices we don't support more than one device.
  315. */
  316. static void find_valid_banks(struct denali_nand_info *denali)
  317. {
  318. uint32_t id[denali->max_banks];
  319. int i;
  320. denali->total_used_banks = 1;
  321. for (i = 0; i < denali->max_banks; i++) {
  322. index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
  323. index_addr(denali, MODE_11 | (i << 24) | 1, 0);
  324. index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
  325. dev_dbg(denali->dev,
  326. "Return 1st ID for bank[%d]: %x\n", i, id[i]);
  327. if (i == 0) {
  328. if (!(id[i] & 0x0ff))
  329. break; /* WTF? */
  330. } else {
  331. if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
  332. denali->total_used_banks++;
  333. else
  334. break;
  335. }
  336. }
  337. if (denali->platform == INTEL_CE4100) {
  338. /*
  339. * Platform limitations of the CE4100 device limit
  340. * users to a single chip solution for NAND.
  341. * Multichip support is not enabled.
  342. */
  343. if (denali->total_used_banks != 1) {
  344. dev_err(denali->dev,
  345. "Sorry, Intel CE4100 only supports a single NAND device.\n");
  346. BUG();
  347. }
  348. }
  349. dev_dbg(denali->dev,
  350. "denali->total_used_banks: %d\n", denali->total_used_banks);
  351. }
  352. /*
  353. * Use the configuration feature register to determine the maximum number of
  354. * banks that the hardware supports.
  355. */
  356. static void detect_max_banks(struct denali_nand_info *denali)
  357. {
  358. uint32_t features = ioread32(denali->flash_reg + FEATURES);
  359. denali->max_banks = 1 << (features & FEATURES__N_BANKS);
  360. /* the encoding changed from rev 5.0 to 5.1 */
  361. if (denali->revision < 0x0501)
  362. denali->max_banks <<= 1;
  363. }
  364. static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
  365. {
  366. uint16_t status = PASS;
  367. uint32_t id_bytes[8], addr;
  368. uint8_t maf_id, device_id;
  369. int i;
  370. /*
  371. * Use read id method to get device ID and other params.
  372. * For some NAND chips, controller can't report the correct
  373. * device ID by reading from DEVICE_ID register
  374. */
  375. addr = MODE_11 | BANK(denali->flash_bank);
  376. index_addr(denali, addr | 0, 0x90);
  377. index_addr(denali, addr | 1, 0);
  378. for (i = 0; i < 8; i++)
  379. index_addr_read_data(denali, addr | 2, &id_bytes[i]);
  380. maf_id = id_bytes[0];
  381. device_id = id_bytes[1];
  382. if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
  383. ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
  384. if (FAIL == get_onfi_nand_para(denali))
  385. return FAIL;
  386. } else if (maf_id == 0xEC) { /* Samsung NAND */
  387. get_samsung_nand_para(denali, device_id);
  388. } else if (maf_id == 0x98) { /* Toshiba NAND */
  389. get_toshiba_nand_para(denali);
  390. } else if (maf_id == 0xAD) { /* Hynix NAND */
  391. get_hynix_nand_para(denali, device_id);
  392. }
  393. dev_info(denali->dev,
  394. "Dump timing register values:\n"
  395. "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
  396. "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
  397. "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
  398. ioread32(denali->flash_reg + ACC_CLKS),
  399. ioread32(denali->flash_reg + RE_2_WE),
  400. ioread32(denali->flash_reg + RE_2_RE),
  401. ioread32(denali->flash_reg + WE_2_RE),
  402. ioread32(denali->flash_reg + ADDR_2_DATA),
  403. ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
  404. ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
  405. ioread32(denali->flash_reg + CS_SETUP_CNT));
  406. find_valid_banks(denali);
  407. /*
  408. * If the user specified to override the default timings
  409. * with a specific ONFI mode, we apply those changes here.
  410. */
  411. if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
  412. nand_onfi_timing_set(denali, onfi_timing_mode);
  413. return status;
  414. }
  415. static void denali_set_intr_modes(struct denali_nand_info *denali,
  416. uint16_t INT_ENABLE)
  417. {
  418. if (INT_ENABLE)
  419. iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
  420. else
  421. iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
  422. }
  423. /*
  424. * validation function to verify that the controlling software is making
  425. * a valid request
  426. */
  427. static inline bool is_flash_bank_valid(int flash_bank)
  428. {
  429. return flash_bank >= 0 && flash_bank < 4;
  430. }
  431. static void denali_irq_init(struct denali_nand_info *denali)
  432. {
  433. uint32_t int_mask;
  434. int i;
  435. /* Disable global interrupts */
  436. denali_set_intr_modes(denali, false);
  437. int_mask = DENALI_IRQ_ALL;
  438. /* Clear all status bits */
  439. for (i = 0; i < denali->max_banks; ++i)
  440. iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
  441. denali_irq_enable(denali, int_mask);
  442. }
  443. static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
  444. {
  445. denali_set_intr_modes(denali, false);
  446. }
  447. static void denali_irq_enable(struct denali_nand_info *denali,
  448. uint32_t int_mask)
  449. {
  450. int i;
  451. for (i = 0; i < denali->max_banks; ++i)
  452. iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
  453. }
  454. /*
  455. * This function only returns when an interrupt that this driver cares about
  456. * occurs. This is to reduce the overhead of servicing interrupts
  457. */
  458. static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
  459. {
  460. return read_interrupt_status(denali) & DENALI_IRQ_ALL;
  461. }
  462. /* Interrupts are cleared by writing a 1 to the appropriate status bit */
  463. static inline void clear_interrupt(struct denali_nand_info *denali,
  464. uint32_t irq_mask)
  465. {
  466. uint32_t intr_status_reg;
  467. intr_status_reg = INTR_STATUS(denali->flash_bank);
  468. iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
  469. }
  470. static void clear_interrupts(struct denali_nand_info *denali)
  471. {
  472. uint32_t status;
  473. spin_lock_irq(&denali->irq_lock);
  474. status = read_interrupt_status(denali);
  475. clear_interrupt(denali, status);
  476. denali->irq_status = 0x0;
  477. spin_unlock_irq(&denali->irq_lock);
  478. }
  479. static uint32_t read_interrupt_status(struct denali_nand_info *denali)
  480. {
  481. uint32_t intr_status_reg;
  482. intr_status_reg = INTR_STATUS(denali->flash_bank);
  483. return ioread32(denali->flash_reg + intr_status_reg);
  484. }
  485. /*
  486. * This is the interrupt service routine. It handles all interrupts
  487. * sent to this device. Note that on CE4100, this is a shared interrupt.
  488. */
  489. static irqreturn_t denali_isr(int irq, void *dev_id)
  490. {
  491. struct denali_nand_info *denali = dev_id;
  492. uint32_t irq_status;
  493. irqreturn_t result = IRQ_NONE;
  494. spin_lock(&denali->irq_lock);
  495. /* check to see if a valid NAND chip has been selected. */
  496. if (is_flash_bank_valid(denali->flash_bank)) {
  497. /*
  498. * check to see if controller generated the interrupt,
  499. * since this is a shared interrupt
  500. */
  501. irq_status = denali_irq_detected(denali);
  502. if (irq_status != 0) {
  503. /* handle interrupt */
  504. /* first acknowledge it */
  505. clear_interrupt(denali, irq_status);
  506. /*
  507. * store the status in the device context for someone
  508. * to read
  509. */
  510. denali->irq_status |= irq_status;
  511. /* notify anyone who cares that it happened */
  512. complete(&denali->complete);
  513. /* tell the OS that we've handled this */
  514. result = IRQ_HANDLED;
  515. }
  516. }
  517. spin_unlock(&denali->irq_lock);
  518. return result;
  519. }
  520. static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
  521. {
  522. unsigned long comp_res;
  523. uint32_t intr_status;
  524. unsigned long timeout = msecs_to_jiffies(1000);
  525. do {
  526. comp_res =
  527. wait_for_completion_timeout(&denali->complete, timeout);
  528. spin_lock_irq(&denali->irq_lock);
  529. intr_status = denali->irq_status;
  530. if (intr_status & irq_mask) {
  531. denali->irq_status &= ~irq_mask;
  532. spin_unlock_irq(&denali->irq_lock);
  533. /* our interrupt was detected */
  534. break;
  535. }
  536. /*
  537. * these are not the interrupts you are looking for -
  538. * need to wait again
  539. */
  540. spin_unlock_irq(&denali->irq_lock);
  541. } while (comp_res != 0);
  542. if (comp_res == 0) {
  543. /* timeout */
  544. pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
  545. intr_status, irq_mask);
  546. intr_status = 0;
  547. }
  548. return intr_status;
  549. }
  550. /*
  551. * This helper function setups the registers for ECC and whether or not
  552. * the spare area will be transferred.
  553. */
  554. static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
  555. bool transfer_spare)
  556. {
  557. int ecc_en_flag, transfer_spare_flag;
  558. /* set ECC, transfer spare bits if needed */
  559. ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
  560. transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
  561. /* Enable spare area/ECC per user's request. */
  562. iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
  563. iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
  564. }
  565. /*
  566. * sends a pipeline command operation to the controller. See the Denali NAND
  567. * controller's user guide for more information (section 4.2.3.6).
  568. */
  569. static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
  570. bool ecc_en, bool transfer_spare,
  571. int access_type, int op)
  572. {
  573. int status = PASS;
  574. uint32_t addr, cmd;
  575. setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
  576. clear_interrupts(denali);
  577. addr = BANK(denali->flash_bank) | denali->page;
  578. if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
  579. cmd = MODE_01 | addr;
  580. iowrite32(cmd, denali->flash_mem);
  581. } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
  582. /* read spare area */
  583. cmd = MODE_10 | addr;
  584. index_addr(denali, cmd, access_type);
  585. cmd = MODE_01 | addr;
  586. iowrite32(cmd, denali->flash_mem);
  587. } else if (op == DENALI_READ) {
  588. /* setup page read request for access type */
  589. cmd = MODE_10 | addr;
  590. index_addr(denali, cmd, access_type);
  591. cmd = MODE_01 | addr;
  592. iowrite32(cmd, denali->flash_mem);
  593. }
  594. return status;
  595. }
  596. /* helper function that simply writes a buffer to the flash */
  597. static int write_data_to_flash_mem(struct denali_nand_info *denali,
  598. const uint8_t *buf, int len)
  599. {
  600. uint32_t *buf32;
  601. int i;
  602. /*
  603. * verify that the len is a multiple of 4.
  604. * see comment in read_data_from_flash_mem()
  605. */
  606. BUG_ON((len % 4) != 0);
  607. /* write the data to the flash memory */
  608. buf32 = (uint32_t *)buf;
  609. for (i = 0; i < len / 4; i++)
  610. iowrite32(*buf32++, denali->flash_mem + 0x10);
  611. return i * 4; /* intent is to return the number of bytes read */
  612. }
  613. /* helper function that simply reads a buffer from the flash */
  614. static int read_data_from_flash_mem(struct denali_nand_info *denali,
  615. uint8_t *buf, int len)
  616. {
  617. uint32_t *buf32;
  618. int i;
  619. /*
  620. * we assume that len will be a multiple of 4, if not it would be nice
  621. * to know about it ASAP rather than have random failures...
  622. * This assumption is based on the fact that this function is designed
  623. * to be used to read flash pages, which are typically multiples of 4.
  624. */
  625. BUG_ON((len % 4) != 0);
  626. /* transfer the data from the flash */
  627. buf32 = (uint32_t *)buf;
  628. for (i = 0; i < len / 4; i++)
  629. *buf32++ = ioread32(denali->flash_mem + 0x10);
  630. return i * 4; /* intent is to return the number of bytes read */
  631. }
  632. /* writes OOB data to the device */
  633. static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
  634. {
  635. struct denali_nand_info *denali = mtd_to_denali(mtd);
  636. uint32_t irq_status;
  637. uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL;
  638. int status = 0;
  639. denali->page = page;
  640. if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
  641. DENALI_WRITE) == PASS) {
  642. write_data_to_flash_mem(denali, buf, mtd->oobsize);
  643. /* wait for operation to complete */
  644. irq_status = wait_for_irq(denali, irq_mask);
  645. if (irq_status == 0) {
  646. dev_err(denali->dev, "OOB write failed\n");
  647. status = -EIO;
  648. }
  649. } else {
  650. dev_err(denali->dev, "unable to send pipeline command\n");
  651. status = -EIO;
  652. }
  653. return status;
  654. }
  655. /* reads OOB data from the device */
  656. static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
  657. {
  658. struct denali_nand_info *denali = mtd_to_denali(mtd);
  659. uint32_t irq_mask = INTR__LOAD_COMP;
  660. uint32_t irq_status, addr, cmd;
  661. denali->page = page;
  662. if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
  663. DENALI_READ) == PASS) {
  664. read_data_from_flash_mem(denali, buf, mtd->oobsize);
  665. /*
  666. * wait for command to be accepted
  667. * can always use status0 bit as the
  668. * mask is identical for each bank.
  669. */
  670. irq_status = wait_for_irq(denali, irq_mask);
  671. if (irq_status == 0)
  672. dev_err(denali->dev, "page on OOB timeout %d\n",
  673. denali->page);
  674. /*
  675. * We set the device back to MAIN_ACCESS here as I observed
  676. * instability with the controller if you do a block erase
  677. * and the last transaction was a SPARE_ACCESS. Block erase
  678. * is reliable (according to the MTD test infrastructure)
  679. * if you are in MAIN_ACCESS.
  680. */
  681. addr = BANK(denali->flash_bank) | denali->page;
  682. cmd = MODE_10 | addr;
  683. index_addr(denali, cmd, MAIN_ACCESS);
  684. }
  685. }
  686. static int denali_check_erased_page(struct mtd_info *mtd,
  687. struct nand_chip *chip, uint8_t *buf,
  688. unsigned long uncor_ecc_flags,
  689. unsigned int max_bitflips)
  690. {
  691. uint8_t *ecc_code = chip->buffers->ecccode;
  692. int ecc_steps = chip->ecc.steps;
  693. int ecc_size = chip->ecc.size;
  694. int ecc_bytes = chip->ecc.bytes;
  695. int i, ret, stat;
  696. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  697. chip->ecc.total);
  698. if (ret)
  699. return ret;
  700. for (i = 0; i < ecc_steps; i++) {
  701. if (!(uncor_ecc_flags & BIT(i)))
  702. continue;
  703. stat = nand_check_erased_ecc_chunk(buf, ecc_size,
  704. ecc_code, ecc_bytes,
  705. NULL, 0,
  706. chip->ecc.strength);
  707. if (stat < 0) {
  708. mtd->ecc_stats.failed++;
  709. } else {
  710. mtd->ecc_stats.corrected += stat;
  711. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  712. }
  713. buf += ecc_size;
  714. ecc_code += ecc_bytes;
  715. }
  716. return max_bitflips;
  717. }
  718. static int denali_hw_ecc_fixup(struct mtd_info *mtd,
  719. struct denali_nand_info *denali,
  720. unsigned long *uncor_ecc_flags)
  721. {
  722. struct nand_chip *chip = mtd_to_nand(mtd);
  723. int bank = denali->flash_bank;
  724. uint32_t ecc_cor;
  725. unsigned int max_bitflips;
  726. ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
  727. ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
  728. if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
  729. /*
  730. * This flag is set when uncorrectable error occurs at least in
  731. * one ECC sector. We can not know "how many sectors", or
  732. * "which sector(s)". We need erase-page check for all sectors.
  733. */
  734. *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
  735. return 0;
  736. }
  737. max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
  738. /*
  739. * The register holds the maximum of per-sector corrected bitflips.
  740. * This is suitable for the return value of the ->read_page() callback.
  741. * Unfortunately, we can not know the total number of corrected bits in
  742. * the page. Increase the stats by max_bitflips. (compromised solution)
  743. */
  744. mtd->ecc_stats.corrected += max_bitflips;
  745. return max_bitflips;
  746. }
  747. #define ECC_SECTOR_SIZE 512
  748. #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
  749. #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
  750. #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
  751. #define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
  752. #define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
  753. #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
  754. static int denali_sw_ecc_fixup(struct mtd_info *mtd,
  755. struct denali_nand_info *denali,
  756. unsigned long *uncor_ecc_flags, uint8_t *buf)
  757. {
  758. unsigned int bitflips = 0;
  759. unsigned int max_bitflips = 0;
  760. uint32_t err_addr, err_cor_info;
  761. unsigned int err_byte, err_sector, err_device;
  762. uint8_t err_cor_value;
  763. unsigned int prev_sector = 0;
  764. /* read the ECC errors. we'll ignore them for now */
  765. denali_set_intr_modes(denali, false);
  766. do {
  767. err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
  768. err_sector = ECC_SECTOR(err_addr);
  769. err_byte = ECC_BYTE(err_addr);
  770. err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
  771. err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
  772. err_device = ECC_ERR_DEVICE(err_cor_info);
  773. /* reset the bitflip counter when crossing ECC sector */
  774. if (err_sector != prev_sector)
  775. bitflips = 0;
  776. if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
  777. /*
  778. * Check later if this is a real ECC error, or
  779. * an erased sector.
  780. */
  781. *uncor_ecc_flags |= BIT(err_sector);
  782. } else if (err_byte < ECC_SECTOR_SIZE) {
  783. /*
  784. * If err_byte is larger than ECC_SECTOR_SIZE, means error
  785. * happened in OOB, so we ignore it. It's no need for
  786. * us to correct it err_device is represented the NAND
  787. * error bits are happened in if there are more than
  788. * one NAND connected.
  789. */
  790. int offset;
  791. unsigned int flips_in_byte;
  792. offset = (err_sector * ECC_SECTOR_SIZE + err_byte) *
  793. denali->devnum + err_device;
  794. /* correct the ECC error */
  795. flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
  796. buf[offset] ^= err_cor_value;
  797. mtd->ecc_stats.corrected += flips_in_byte;
  798. bitflips += flips_in_byte;
  799. max_bitflips = max(max_bitflips, bitflips);
  800. }
  801. prev_sector = err_sector;
  802. } while (!ECC_LAST_ERR(err_cor_info));
  803. /*
  804. * Once handle all ecc errors, controller will trigger a
  805. * ECC_TRANSACTION_DONE interrupt, so here just wait for
  806. * a while for this interrupt
  807. */
  808. while (!(read_interrupt_status(denali) & INTR__ECC_TRANSACTION_DONE))
  809. cpu_relax();
  810. clear_interrupts(denali);
  811. denali_set_intr_modes(denali, true);
  812. return max_bitflips;
  813. }
  814. /* programs the controller to either enable/disable DMA transfers */
  815. static void denali_enable_dma(struct denali_nand_info *denali, bool en)
  816. {
  817. iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
  818. ioread32(denali->flash_reg + DMA_ENABLE);
  819. }
  820. static void denali_setup_dma64(struct denali_nand_info *denali, int op)
  821. {
  822. uint32_t mode;
  823. const int page_count = 1;
  824. uint64_t addr = denali->buf.dma_buf;
  825. mode = MODE_10 | BANK(denali->flash_bank) | denali->page;
  826. /* DMA is a three step process */
  827. /*
  828. * 1. setup transfer type, interrupt when complete,
  829. * burst len = 64 bytes, the number of pages
  830. */
  831. index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
  832. /* 2. set memory low address */
  833. index_addr(denali, mode, addr);
  834. /* 3. set memory high address */
  835. index_addr(denali, mode, addr >> 32);
  836. }
  837. static void denali_setup_dma32(struct denali_nand_info *denali, int op)
  838. {
  839. uint32_t mode;
  840. const int page_count = 1;
  841. uint32_t addr = denali->buf.dma_buf;
  842. mode = MODE_10 | BANK(denali->flash_bank);
  843. /* DMA is a four step process */
  844. /* 1. setup transfer type and # of pages */
  845. index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
  846. /* 2. set memory high address bits 23:8 */
  847. index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
  848. /* 3. set memory low address bits 23:8 */
  849. index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
  850. /* 4. interrupt when complete, burst len = 64 bytes */
  851. index_addr(denali, mode | 0x14000, 0x2400);
  852. }
  853. static void denali_setup_dma(struct denali_nand_info *denali, int op)
  854. {
  855. if (denali->caps & DENALI_CAP_DMA_64BIT)
  856. denali_setup_dma64(denali, op);
  857. else
  858. denali_setup_dma32(denali, op);
  859. }
  860. /*
  861. * writes a page. user specifies type, and this function handles the
  862. * configuration details.
  863. */
  864. static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
  865. const uint8_t *buf, bool raw_xfer)
  866. {
  867. struct denali_nand_info *denali = mtd_to_denali(mtd);
  868. dma_addr_t addr = denali->buf.dma_buf;
  869. size_t size = mtd->writesize + mtd->oobsize;
  870. uint32_t irq_status;
  871. uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
  872. /*
  873. * if it is a raw xfer, we want to disable ecc and send the spare area.
  874. * !raw_xfer - enable ecc
  875. * raw_xfer - transfer spare
  876. */
  877. setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
  878. /* copy buffer into DMA buffer */
  879. memcpy(denali->buf.buf, buf, mtd->writesize);
  880. if (raw_xfer) {
  881. /* transfer the data to the spare area */
  882. memcpy(denali->buf.buf + mtd->writesize,
  883. chip->oob_poi,
  884. mtd->oobsize);
  885. }
  886. dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
  887. clear_interrupts(denali);
  888. denali_enable_dma(denali, true);
  889. denali_setup_dma(denali, DENALI_WRITE);
  890. /* wait for operation to complete */
  891. irq_status = wait_for_irq(denali, irq_mask);
  892. if (irq_status == 0) {
  893. dev_err(denali->dev, "timeout on write_page (type = %d)\n",
  894. raw_xfer);
  895. denali->status = NAND_STATUS_FAIL;
  896. }
  897. denali_enable_dma(denali, false);
  898. dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
  899. return 0;
  900. }
  901. /* NAND core entry points */
  902. /*
  903. * this is the callback that the NAND core calls to write a page. Since
  904. * writing a page with ECC or without is similar, all the work is done
  905. * by write_page above.
  906. */
  907. static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  908. const uint8_t *buf, int oob_required, int page)
  909. {
  910. /*
  911. * for regular page writes, we let HW handle all the ECC
  912. * data written to the device.
  913. */
  914. return write_page(mtd, chip, buf, false);
  915. }
  916. /*
  917. * This is the callback that the NAND core calls to write a page without ECC.
  918. * raw access is similar to ECC page writes, so all the work is done in the
  919. * write_page() function above.
  920. */
  921. static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  922. const uint8_t *buf, int oob_required,
  923. int page)
  924. {
  925. /*
  926. * for raw page writes, we want to disable ECC and simply write
  927. * whatever data is in the buffer.
  928. */
  929. return write_page(mtd, chip, buf, true);
  930. }
  931. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  932. int page)
  933. {
  934. return write_oob_data(mtd, chip->oob_poi, page);
  935. }
  936. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  937. int page)
  938. {
  939. read_oob_data(mtd, chip->oob_poi, page);
  940. return 0;
  941. }
  942. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  943. uint8_t *buf, int oob_required, int page)
  944. {
  945. struct denali_nand_info *denali = mtd_to_denali(mtd);
  946. dma_addr_t addr = denali->buf.dma_buf;
  947. size_t size = mtd->writesize + mtd->oobsize;
  948. uint32_t irq_status;
  949. uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ?
  950. INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR :
  951. INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR;
  952. unsigned long uncor_ecc_flags = 0;
  953. int stat = 0;
  954. if (page != denali->page) {
  955. dev_err(denali->dev,
  956. "IN %s: page %d is not equal to denali->page %d",
  957. __func__, page, denali->page);
  958. BUG();
  959. }
  960. setup_ecc_for_xfer(denali, true, false);
  961. denali_enable_dma(denali, true);
  962. dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
  963. clear_interrupts(denali);
  964. denali_setup_dma(denali, DENALI_READ);
  965. /* wait for operation to complete */
  966. irq_status = wait_for_irq(denali, irq_mask);
  967. dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
  968. memcpy(buf, denali->buf.buf, mtd->writesize);
  969. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  970. stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
  971. else if (irq_status & INTR__ECC_ERR)
  972. stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
  973. denali_enable_dma(denali, false);
  974. if (stat < 0)
  975. return stat;
  976. if (uncor_ecc_flags) {
  977. read_oob_data(mtd, chip->oob_poi, denali->page);
  978. stat = denali_check_erased_page(mtd, chip, buf,
  979. uncor_ecc_flags, stat);
  980. }
  981. return stat;
  982. }
  983. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  984. uint8_t *buf, int oob_required, int page)
  985. {
  986. struct denali_nand_info *denali = mtd_to_denali(mtd);
  987. dma_addr_t addr = denali->buf.dma_buf;
  988. size_t size = mtd->writesize + mtd->oobsize;
  989. uint32_t irq_mask = INTR__DMA_CMD_COMP;
  990. if (page != denali->page) {
  991. dev_err(denali->dev,
  992. "IN %s: page %d is not equal to denali->page %d",
  993. __func__, page, denali->page);
  994. BUG();
  995. }
  996. setup_ecc_for_xfer(denali, false, true);
  997. denali_enable_dma(denali, true);
  998. dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
  999. clear_interrupts(denali);
  1000. denali_setup_dma(denali, DENALI_READ);
  1001. /* wait for operation to complete */
  1002. wait_for_irq(denali, irq_mask);
  1003. dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
  1004. denali_enable_dma(denali, false);
  1005. memcpy(buf, denali->buf.buf, mtd->writesize);
  1006. memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
  1007. return 0;
  1008. }
  1009. static uint8_t denali_read_byte(struct mtd_info *mtd)
  1010. {
  1011. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1012. uint8_t result = 0xff;
  1013. if (denali->buf.head < denali->buf.tail)
  1014. result = denali->buf.buf[denali->buf.head++];
  1015. return result;
  1016. }
  1017. static void denali_select_chip(struct mtd_info *mtd, int chip)
  1018. {
  1019. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1020. spin_lock_irq(&denali->irq_lock);
  1021. denali->flash_bank = chip;
  1022. spin_unlock_irq(&denali->irq_lock);
  1023. }
  1024. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  1025. {
  1026. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1027. int status = denali->status;
  1028. denali->status = 0;
  1029. return status;
  1030. }
  1031. static int denali_erase(struct mtd_info *mtd, int page)
  1032. {
  1033. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1034. uint32_t cmd, irq_status;
  1035. clear_interrupts(denali);
  1036. /* setup page read request for access type */
  1037. cmd = MODE_10 | BANK(denali->flash_bank) | page;
  1038. index_addr(denali, cmd, 0x1);
  1039. /* wait for erase to complete or failure to occur */
  1040. irq_status = wait_for_irq(denali, INTR__ERASE_COMP | INTR__ERASE_FAIL);
  1041. return irq_status & INTR__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
  1042. }
  1043. static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
  1044. int page)
  1045. {
  1046. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1047. uint32_t addr, id;
  1048. int i;
  1049. switch (cmd) {
  1050. case NAND_CMD_PAGEPROG:
  1051. break;
  1052. case NAND_CMD_STATUS:
  1053. read_status(denali);
  1054. break;
  1055. case NAND_CMD_READID:
  1056. case NAND_CMD_PARAM:
  1057. reset_buf(denali);
  1058. /*
  1059. * sometimes ManufactureId read from register is not right
  1060. * e.g. some of Micron MT29F32G08QAA MLC NAND chips
  1061. * So here we send READID cmd to NAND insteand
  1062. */
  1063. addr = MODE_11 | BANK(denali->flash_bank);
  1064. index_addr(denali, addr | 0, 0x90);
  1065. index_addr(denali, addr | 1, col);
  1066. for (i = 0; i < 8; i++) {
  1067. index_addr_read_data(denali, addr | 2, &id);
  1068. write_byte_to_buf(denali, id);
  1069. }
  1070. break;
  1071. case NAND_CMD_READ0:
  1072. case NAND_CMD_SEQIN:
  1073. denali->page = page;
  1074. break;
  1075. case NAND_CMD_RESET:
  1076. reset_bank(denali);
  1077. break;
  1078. case NAND_CMD_READOOB:
  1079. /* TODO: Read OOB data */
  1080. break;
  1081. default:
  1082. pr_err(": unsupported command received 0x%x\n", cmd);
  1083. break;
  1084. }
  1085. }
  1086. /* end NAND core entry points */
  1087. /* Initialization code to bring the device up to a known good state */
  1088. static void denali_hw_init(struct denali_nand_info *denali)
  1089. {
  1090. /*
  1091. * The REVISION register may not be reliable. Platforms are allowed to
  1092. * override it.
  1093. */
  1094. if (!denali->revision)
  1095. denali->revision =
  1096. swab16(ioread32(denali->flash_reg + REVISION));
  1097. /*
  1098. * tell driver how many bit controller will skip before
  1099. * writing ECC code in OOB, this register may be already
  1100. * set by firmware. So we read this value out.
  1101. * if this value is 0, just let it be.
  1102. */
  1103. denali->bbtskipbytes = ioread32(denali->flash_reg +
  1104. SPARE_AREA_SKIP_BYTES);
  1105. detect_max_banks(denali);
  1106. denali_nand_reset(denali);
  1107. iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
  1108. iowrite32(CHIP_EN_DONT_CARE__FLAG,
  1109. denali->flash_reg + CHIP_ENABLE_DONT_CARE);
  1110. iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
  1111. /* Should set value for these registers when init */
  1112. iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
  1113. iowrite32(1, denali->flash_reg + ECC_ENABLE);
  1114. denali_nand_timing_set(denali);
  1115. denali_irq_init(denali);
  1116. }
  1117. /*
  1118. * Althogh controller spec said SLC ECC is forceb to be 4bit,
  1119. * but denali controller in MRST only support 15bit and 8bit ECC
  1120. * correction
  1121. */
  1122. #define ECC_8BITS 14
  1123. #define ECC_15BITS 26
  1124. static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
  1125. struct mtd_oob_region *oobregion)
  1126. {
  1127. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1128. struct nand_chip *chip = mtd_to_nand(mtd);
  1129. if (section)
  1130. return -ERANGE;
  1131. oobregion->offset = denali->bbtskipbytes;
  1132. oobregion->length = chip->ecc.total;
  1133. return 0;
  1134. }
  1135. static int denali_ooblayout_free(struct mtd_info *mtd, int section,
  1136. struct mtd_oob_region *oobregion)
  1137. {
  1138. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1139. struct nand_chip *chip = mtd_to_nand(mtd);
  1140. if (section)
  1141. return -ERANGE;
  1142. oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
  1143. oobregion->length = mtd->oobsize - oobregion->offset;
  1144. return 0;
  1145. }
  1146. static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
  1147. .ecc = denali_ooblayout_ecc,
  1148. .free = denali_ooblayout_free,
  1149. };
  1150. static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
  1151. static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
  1152. static struct nand_bbt_descr bbt_main_descr = {
  1153. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  1154. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  1155. .offs = 8,
  1156. .len = 4,
  1157. .veroffs = 12,
  1158. .maxblocks = 4,
  1159. .pattern = bbt_pattern,
  1160. };
  1161. static struct nand_bbt_descr bbt_mirror_descr = {
  1162. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  1163. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  1164. .offs = 8,
  1165. .len = 4,
  1166. .veroffs = 12,
  1167. .maxblocks = 4,
  1168. .pattern = mirror_pattern,
  1169. };
  1170. /* initialize driver data structures */
  1171. static void denali_drv_init(struct denali_nand_info *denali)
  1172. {
  1173. /*
  1174. * the completion object will be used to notify
  1175. * the callee that the interrupt is done
  1176. */
  1177. init_completion(&denali->complete);
  1178. /*
  1179. * the spinlock will be used to synchronize the ISR with any
  1180. * element that might be access shared data (interrupt status)
  1181. */
  1182. spin_lock_init(&denali->irq_lock);
  1183. /* indicate that MTD has not selected a valid bank yet */
  1184. denali->flash_bank = CHIP_SELECT_INVALID;
  1185. /* initialize our irq_status variable to indicate no interrupts */
  1186. denali->irq_status = 0;
  1187. }
  1188. static int denali_multidev_fixup(struct denali_nand_info *denali)
  1189. {
  1190. struct nand_chip *chip = &denali->nand;
  1191. struct mtd_info *mtd = nand_to_mtd(chip);
  1192. /*
  1193. * Support for multi device:
  1194. * When the IP configuration is x16 capable and two x8 chips are
  1195. * connected in parallel, DEVICES_CONNECTED should be set to 2.
  1196. * In this case, the core framework knows nothing about this fact,
  1197. * so we should tell it the _logical_ pagesize and anything necessary.
  1198. */
  1199. denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
  1200. /*
  1201. * On some SoCs, DEVICES_CONNECTED is not auto-detected.
  1202. * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
  1203. */
  1204. if (denali->devnum == 0) {
  1205. denali->devnum = 1;
  1206. iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
  1207. }
  1208. if (denali->devnum == 1)
  1209. return 0;
  1210. if (denali->devnum != 2) {
  1211. dev_err(denali->dev, "unsupported number of devices %d\n",
  1212. denali->devnum);
  1213. return -EINVAL;
  1214. }
  1215. /* 2 chips in parallel */
  1216. mtd->size <<= 1;
  1217. mtd->erasesize <<= 1;
  1218. mtd->writesize <<= 1;
  1219. mtd->oobsize <<= 1;
  1220. chip->chipsize <<= 1;
  1221. chip->page_shift += 1;
  1222. chip->phys_erase_shift += 1;
  1223. chip->bbt_erase_shift += 1;
  1224. chip->chip_shift += 1;
  1225. chip->pagemask <<= 1;
  1226. chip->ecc.size <<= 1;
  1227. chip->ecc.bytes <<= 1;
  1228. chip->ecc.strength <<= 1;
  1229. denali->bbtskipbytes <<= 1;
  1230. return 0;
  1231. }
  1232. int denali_init(struct denali_nand_info *denali)
  1233. {
  1234. struct nand_chip *chip = &denali->nand;
  1235. struct mtd_info *mtd = nand_to_mtd(chip);
  1236. int ret;
  1237. if (denali->platform == INTEL_CE4100) {
  1238. /*
  1239. * Due to a silicon limitation, we can only support
  1240. * ONFI timing mode 1 and below.
  1241. */
  1242. if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
  1243. pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
  1244. return -EINVAL;
  1245. }
  1246. }
  1247. /* allocate a temporary buffer for nand_scan_ident() */
  1248. denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
  1249. GFP_DMA | GFP_KERNEL);
  1250. if (!denali->buf.buf)
  1251. return -ENOMEM;
  1252. mtd->dev.parent = denali->dev;
  1253. denali_hw_init(denali);
  1254. denali_drv_init(denali);
  1255. /* Request IRQ after all the hardware initialization is finished */
  1256. ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
  1257. IRQF_SHARED, DENALI_NAND_NAME, denali);
  1258. if (ret) {
  1259. dev_err(denali->dev, "Unable to request IRQ\n");
  1260. return ret;
  1261. }
  1262. /* now that our ISR is registered, we can enable interrupts */
  1263. denali_set_intr_modes(denali, true);
  1264. nand_set_flash_node(chip, denali->dev->of_node);
  1265. /* Fallback to the default name if DT did not give "label" property */
  1266. if (!mtd->name)
  1267. mtd->name = "denali-nand";
  1268. /* register the driver with the NAND core subsystem */
  1269. chip->select_chip = denali_select_chip;
  1270. chip->cmdfunc = denali_cmdfunc;
  1271. chip->read_byte = denali_read_byte;
  1272. chip->waitfunc = denali_waitfunc;
  1273. /*
  1274. * scan for NAND devices attached to the controller
  1275. * this is the first stage in a two step process to register
  1276. * with the nand subsystem
  1277. */
  1278. ret = nand_scan_ident(mtd, denali->max_banks, NULL);
  1279. if (ret)
  1280. goto failed_req_irq;
  1281. /* allocate the right size buffer now */
  1282. devm_kfree(denali->dev, denali->buf.buf);
  1283. denali->buf.buf = devm_kzalloc(denali->dev,
  1284. mtd->writesize + mtd->oobsize,
  1285. GFP_KERNEL);
  1286. if (!denali->buf.buf) {
  1287. ret = -ENOMEM;
  1288. goto failed_req_irq;
  1289. }
  1290. ret = dma_set_mask(denali->dev,
  1291. DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ?
  1292. 64 : 32));
  1293. if (ret) {
  1294. dev_err(denali->dev, "No usable DMA configuration\n");
  1295. goto failed_req_irq;
  1296. }
  1297. denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
  1298. mtd->writesize + mtd->oobsize,
  1299. DMA_BIDIRECTIONAL);
  1300. if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
  1301. dev_err(denali->dev, "Failed to map DMA buffer\n");
  1302. ret = -EIO;
  1303. goto failed_req_irq;
  1304. }
  1305. /*
  1306. * second stage of the NAND scan
  1307. * this stage requires information regarding ECC and
  1308. * bad block management.
  1309. */
  1310. /* Bad block management */
  1311. chip->bbt_td = &bbt_main_descr;
  1312. chip->bbt_md = &bbt_mirror_descr;
  1313. /* skip the scan for now until we have OOB read and write support */
  1314. chip->bbt_options |= NAND_BBT_USE_FLASH;
  1315. chip->options |= NAND_SKIP_BBTSCAN;
  1316. chip->ecc.mode = NAND_ECC_HW_SYNDROME;
  1317. /* no subpage writes on denali */
  1318. chip->options |= NAND_NO_SUBPAGE_WRITE;
  1319. /*
  1320. * Denali Controller only support 15bit and 8bit ECC in MRST,
  1321. * so just let controller do 15bit ECC for MLC and 8bit ECC for
  1322. * SLC if possible.
  1323. * */
  1324. if (!nand_is_slc(chip) &&
  1325. (mtd->oobsize > (denali->bbtskipbytes +
  1326. ECC_15BITS * (mtd->writesize /
  1327. ECC_SECTOR_SIZE)))) {
  1328. /* if MLC OOB size is large enough, use 15bit ECC*/
  1329. chip->ecc.strength = 15;
  1330. chip->ecc.bytes = ECC_15BITS;
  1331. iowrite32(15, denali->flash_reg + ECC_CORRECTION);
  1332. } else if (mtd->oobsize < (denali->bbtskipbytes +
  1333. ECC_8BITS * (mtd->writesize /
  1334. ECC_SECTOR_SIZE))) {
  1335. pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes");
  1336. goto failed_req_irq;
  1337. } else {
  1338. chip->ecc.strength = 8;
  1339. chip->ecc.bytes = ECC_8BITS;
  1340. iowrite32(8, denali->flash_reg + ECC_CORRECTION);
  1341. }
  1342. mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
  1343. /* override the default read operations */
  1344. chip->ecc.size = ECC_SECTOR_SIZE;
  1345. chip->ecc.read_page = denali_read_page;
  1346. chip->ecc.read_page_raw = denali_read_page_raw;
  1347. chip->ecc.write_page = denali_write_page;
  1348. chip->ecc.write_page_raw = denali_write_page_raw;
  1349. chip->ecc.read_oob = denali_read_oob;
  1350. chip->ecc.write_oob = denali_write_oob;
  1351. chip->erase = denali_erase;
  1352. ret = denali_multidev_fixup(denali);
  1353. if (ret)
  1354. goto failed_req_irq;
  1355. ret = nand_scan_tail(mtd);
  1356. if (ret)
  1357. goto failed_req_irq;
  1358. ret = mtd_device_register(mtd, NULL, 0);
  1359. if (ret) {
  1360. dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
  1361. goto failed_req_irq;
  1362. }
  1363. return 0;
  1364. failed_req_irq:
  1365. denali_irq_cleanup(denali->irq, denali);
  1366. return ret;
  1367. }
  1368. EXPORT_SYMBOL(denali_init);
  1369. /* driver exit point */
  1370. void denali_remove(struct denali_nand_info *denali)
  1371. {
  1372. struct mtd_info *mtd = nand_to_mtd(&denali->nand);
  1373. /*
  1374. * Pre-compute DMA buffer size to avoid any problems in case
  1375. * nand_release() ever changes in a way that mtd->writesize and
  1376. * mtd->oobsize are not reliable after this call.
  1377. */
  1378. int bufsize = mtd->writesize + mtd->oobsize;
  1379. nand_release(mtd);
  1380. denali_irq_cleanup(denali->irq, denali);
  1381. dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize,
  1382. DMA_BIDIRECTIONAL);
  1383. }
  1384. EXPORT_SYMBOL(denali_remove);