denali.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright © 2009-2010, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include <linux/interrupt.h>
  20. #include <linux/delay.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/wait.h>
  23. #include <linux/mutex.h>
  24. #include <linux/mtd/mtd.h>
  25. #include <linux/module.h>
  26. #include "denali.h"
  27. MODULE_LICENSE("GPL");
  28. /*
  29. * We define a module parameter that allows the user to override
  30. * the hardware and decide what timing mode should be used.
  31. */
  32. #define NAND_DEFAULT_TIMINGS -1
  33. static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
  34. module_param(onfi_timing_mode, int, S_IRUGO);
  35. MODULE_PARM_DESC(onfi_timing_mode,
  36. "Overrides default ONFI setting. -1 indicates use default timings");
  37. #define DENALI_NAND_NAME "denali-nand"
  38. /*
  39. * We define a macro here that combines all interrupts this driver uses into
  40. * a single constant value, for convenience.
  41. */
  42. #define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
  43. INTR_STATUS__ECC_TRANSACTION_DONE | \
  44. INTR_STATUS__ECC_ERR | \
  45. INTR_STATUS__PROGRAM_FAIL | \
  46. INTR_STATUS__LOAD_COMP | \
  47. INTR_STATUS__PROGRAM_COMP | \
  48. INTR_STATUS__TIME_OUT | \
  49. INTR_STATUS__ERASE_FAIL | \
  50. INTR_STATUS__RST_COMP | \
  51. INTR_STATUS__ERASE_COMP)
  52. /*
  53. * indicates whether or not the internal value for the flash bank is
  54. * valid or not
  55. */
  56. #define CHIP_SELECT_INVALID -1
  57. #define SUPPORT_8BITECC 1
  58. /*
  59. * This macro divides two integers and rounds fractional values up
  60. * to the nearest integer value.
  61. */
  62. #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
  63. /*
  64. * this macro allows us to convert from an MTD structure to our own
  65. * device context (denali) structure.
  66. */
  67. static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  68. {
  69. return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  70. }
  71. /*
  72. * These constants are defined by the driver to enable common driver
  73. * configuration options.
  74. */
  75. #define SPARE_ACCESS 0x41
  76. #define MAIN_ACCESS 0x42
  77. #define MAIN_SPARE_ACCESS 0x43
  78. #define PIPELINE_ACCESS 0x2000
  79. #define DENALI_READ 0
  80. #define DENALI_WRITE 0x100
  81. /* types of device accesses. We can issue commands and get status */
  82. #define COMMAND_CYCLE 0
  83. #define ADDR_CYCLE 1
  84. #define STATUS_CYCLE 2
  85. /*
  86. * this is a helper macro that allows us to
  87. * format the bank into the proper bits for the controller
  88. */
  89. #define BANK(x) ((x) << 24)
  90. /* forward declarations */
  91. static void clear_interrupts(struct denali_nand_info *denali);
  92. static uint32_t wait_for_irq(struct denali_nand_info *denali,
  93. uint32_t irq_mask);
  94. static void denali_irq_enable(struct denali_nand_info *denali,
  95. uint32_t int_mask);
  96. static uint32_t read_interrupt_status(struct denali_nand_info *denali);
  97. /*
  98. * Certain operations for the denali NAND controller use an indexed mode to
  99. * read/write data. The operation is performed by writing the address value
  100. * of the command to the device memory followed by the data. This function
  101. * abstracts this common operation.
  102. */
  103. static void index_addr(struct denali_nand_info *denali,
  104. uint32_t address, uint32_t data)
  105. {
  106. iowrite32(address, denali->flash_mem);
  107. iowrite32(data, denali->flash_mem + 0x10);
  108. }
  109. /* Perform an indexed read of the device */
  110. static void index_addr_read_data(struct denali_nand_info *denali,
  111. uint32_t address, uint32_t *pdata)
  112. {
  113. iowrite32(address, denali->flash_mem);
  114. *pdata = ioread32(denali->flash_mem + 0x10);
  115. }
  116. /*
  117. * We need to buffer some data for some of the NAND core routines.
  118. * The operations manage buffering that data.
  119. */
  120. static void reset_buf(struct denali_nand_info *denali)
  121. {
  122. denali->buf.head = denali->buf.tail = 0;
  123. }
  124. static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
  125. {
  126. denali->buf.buf[denali->buf.tail++] = byte;
  127. }
  128. /* reads the status of the device */
  129. static void read_status(struct denali_nand_info *denali)
  130. {
  131. uint32_t cmd;
  132. /* initialize the data buffer to store status */
  133. reset_buf(denali);
  134. cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
  135. if (cmd)
  136. write_byte_to_buf(denali, NAND_STATUS_WP);
  137. else
  138. write_byte_to_buf(denali, 0);
  139. }
  140. /* resets a specific device connected to the core */
  141. static void reset_bank(struct denali_nand_info *denali)
  142. {
  143. uint32_t irq_status;
  144. uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT;
  145. clear_interrupts(denali);
  146. iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
  147. irq_status = wait_for_irq(denali, irq_mask);
  148. if (irq_status & INTR_STATUS__TIME_OUT)
  149. dev_err(denali->dev, "reset bank failed.\n");
  150. }
  151. /* Reset the flash controller */
  152. static uint16_t denali_nand_reset(struct denali_nand_info *denali)
  153. {
  154. int i;
  155. for (i = 0; i < denali->max_banks; i++)
  156. iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
  157. denali->flash_reg + INTR_STATUS(i));
  158. for (i = 0; i < denali->max_banks; i++) {
  159. iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
  160. while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
  161. (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
  162. cpu_relax();
  163. if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
  164. INTR_STATUS__TIME_OUT)
  165. dev_dbg(denali->dev,
  166. "NAND Reset operation timed out on bank %d\n", i);
  167. }
  168. for (i = 0; i < denali->max_banks; i++)
  169. iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
  170. denali->flash_reg + INTR_STATUS(i));
  171. return PASS;
  172. }
  173. /*
  174. * this routine calculates the ONFI timing values for a given mode and
  175. * programs the clocking register accordingly. The mode is determined by
  176. * the get_onfi_nand_para routine.
  177. */
  178. static void nand_onfi_timing_set(struct denali_nand_info *denali,
  179. uint16_t mode)
  180. {
  181. uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
  182. uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
  183. uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
  184. uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
  185. uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
  186. uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
  187. uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
  188. uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
  189. uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
  190. uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
  191. uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
  192. uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
  193. uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
  194. uint16_t dv_window = 0;
  195. uint16_t en_lo, en_hi;
  196. uint16_t acc_clks;
  197. uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
  198. en_lo = CEIL_DIV(Trp[mode], CLK_X);
  199. en_hi = CEIL_DIV(Treh[mode], CLK_X);
  200. #if ONFI_BLOOM_TIME
  201. if ((en_hi * CLK_X) < (Treh[mode] + 2))
  202. en_hi++;
  203. #endif
  204. if ((en_lo + en_hi) * CLK_X < Trc[mode])
  205. en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
  206. if ((en_lo + en_hi) < CLK_MULTI)
  207. en_lo += CLK_MULTI - en_lo - en_hi;
  208. while (dv_window < 8) {
  209. data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
  210. data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
  211. data_invalid = data_invalid_rhoh < data_invalid_rloh ?
  212. data_invalid_rhoh : data_invalid_rloh;
  213. dv_window = data_invalid - Trea[mode];
  214. if (dv_window < 8)
  215. en_lo++;
  216. }
  217. acc_clks = CEIL_DIV(Trea[mode], CLK_X);
  218. while (acc_clks * CLK_X - Trea[mode] < 3)
  219. acc_clks++;
  220. if (data_invalid - acc_clks * CLK_X < 2)
  221. dev_warn(denali->dev, "%s, Line %d: Warning!\n",
  222. __FILE__, __LINE__);
  223. addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
  224. re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
  225. re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
  226. we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
  227. cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
  228. if (cs_cnt == 0)
  229. cs_cnt = 1;
  230. if (Tcea[mode]) {
  231. while (cs_cnt * CLK_X + Trea[mode] < Tcea[mode])
  232. cs_cnt++;
  233. }
  234. #if MODE5_WORKAROUND
  235. if (mode == 5)
  236. acc_clks = 5;
  237. #endif
  238. /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
  239. if (ioread32(denali->flash_reg + MANUFACTURER_ID) == 0 &&
  240. ioread32(denali->flash_reg + DEVICE_ID) == 0x88)
  241. acc_clks = 6;
  242. iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
  243. iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
  244. iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
  245. iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
  246. iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
  247. iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
  248. iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
  249. iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
  250. }
  251. /* queries the NAND device to see what ONFI modes it supports. */
  252. static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
  253. {
  254. int i;
  255. /*
  256. * we needn't to do a reset here because driver has already
  257. * reset all the banks before
  258. */
  259. if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
  260. ONFI_TIMING_MODE__VALUE))
  261. return FAIL;
  262. for (i = 5; i > 0; i--) {
  263. if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
  264. (0x01 << i))
  265. break;
  266. }
  267. nand_onfi_timing_set(denali, i);
  268. /*
  269. * By now, all the ONFI devices we know support the page cache
  270. * rw feature. So here we enable the pipeline_rw_ahead feature
  271. */
  272. /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
  273. /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
  274. return PASS;
  275. }
  276. static void get_samsung_nand_para(struct denali_nand_info *denali,
  277. uint8_t device_id)
  278. {
  279. if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
  280. /* Set timing register values according to datasheet */
  281. iowrite32(5, denali->flash_reg + ACC_CLKS);
  282. iowrite32(20, denali->flash_reg + RE_2_WE);
  283. iowrite32(12, denali->flash_reg + WE_2_RE);
  284. iowrite32(14, denali->flash_reg + ADDR_2_DATA);
  285. iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
  286. iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
  287. iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
  288. }
  289. }
  290. static void get_toshiba_nand_para(struct denali_nand_info *denali)
  291. {
  292. uint32_t tmp;
  293. /*
  294. * Workaround to fix a controller bug which reports a wrong
  295. * spare area size for some kind of Toshiba NAND device
  296. */
  297. if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
  298. (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
  299. iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  300. tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
  301. ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  302. iowrite32(tmp,
  303. denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
  304. #if SUPPORT_15BITECC
  305. iowrite32(15, denali->flash_reg + ECC_CORRECTION);
  306. #elif SUPPORT_8BITECC
  307. iowrite32(8, denali->flash_reg + ECC_CORRECTION);
  308. #endif
  309. }
  310. }
  311. static void get_hynix_nand_para(struct denali_nand_info *denali,
  312. uint8_t device_id)
  313. {
  314. uint32_t main_size, spare_size;
  315. switch (device_id) {
  316. case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
  317. case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
  318. iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
  319. iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
  320. iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  321. main_size = 4096 *
  322. ioread32(denali->flash_reg + DEVICES_CONNECTED);
  323. spare_size = 224 *
  324. ioread32(denali->flash_reg + DEVICES_CONNECTED);
  325. iowrite32(main_size,
  326. denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
  327. iowrite32(spare_size,
  328. denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
  329. iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
  330. #if SUPPORT_15BITECC
  331. iowrite32(15, denali->flash_reg + ECC_CORRECTION);
  332. #elif SUPPORT_8BITECC
  333. iowrite32(8, denali->flash_reg + ECC_CORRECTION);
  334. #endif
  335. break;
  336. default:
  337. dev_warn(denali->dev,
  338. "Unknown Hynix NAND (Device ID: 0x%x).\n"
  339. "Will use default parameter values instead.\n",
  340. device_id);
  341. }
  342. }
  343. /*
  344. * determines how many NAND chips are connected to the controller. Note for
  345. * Intel CE4100 devices we don't support more than one device.
  346. */
  347. static void find_valid_banks(struct denali_nand_info *denali)
  348. {
  349. uint32_t id[denali->max_banks];
  350. int i;
  351. denali->total_used_banks = 1;
  352. for (i = 0; i < denali->max_banks; i++) {
  353. index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
  354. index_addr(denali, MODE_11 | (i << 24) | 1, 0);
  355. index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
  356. dev_dbg(denali->dev,
  357. "Return 1st ID for bank[%d]: %x\n", i, id[i]);
  358. if (i == 0) {
  359. if (!(id[i] & 0x0ff))
  360. break; /* WTF? */
  361. } else {
  362. if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
  363. denali->total_used_banks++;
  364. else
  365. break;
  366. }
  367. }
  368. if (denali->platform == INTEL_CE4100) {
  369. /*
  370. * Platform limitations of the CE4100 device limit
  371. * users to a single chip solution for NAND.
  372. * Multichip support is not enabled.
  373. */
  374. if (denali->total_used_banks != 1) {
  375. dev_err(denali->dev,
  376. "Sorry, Intel CE4100 only supports a single NAND device.\n");
  377. BUG();
  378. }
  379. }
  380. dev_dbg(denali->dev,
  381. "denali->total_used_banks: %d\n", denali->total_used_banks);
  382. }
  383. /*
  384. * Use the configuration feature register to determine the maximum number of
  385. * banks that the hardware supports.
  386. */
  387. static void detect_max_banks(struct denali_nand_info *denali)
  388. {
  389. uint32_t features = ioread32(denali->flash_reg + FEATURES);
  390. /*
  391. * Read the revision register, so we can calculate the max_banks
  392. * properly: the encoding changed from rev 5.0 to 5.1
  393. */
  394. u32 revision = MAKE_COMPARABLE_REVISION(
  395. ioread32(denali->flash_reg + REVISION));
  396. if (revision < REVISION_5_1)
  397. denali->max_banks = 2 << (features & FEATURES__N_BANKS);
  398. else
  399. denali->max_banks = 1 << (features & FEATURES__N_BANKS);
  400. }
  401. static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
  402. {
  403. uint16_t status = PASS;
  404. uint32_t id_bytes[8], addr;
  405. uint8_t maf_id, device_id;
  406. int i;
  407. /*
  408. * Use read id method to get device ID and other params.
  409. * For some NAND chips, controller can't report the correct
  410. * device ID by reading from DEVICE_ID register
  411. */
  412. addr = MODE_11 | BANK(denali->flash_bank);
  413. index_addr(denali, addr | 0, 0x90);
  414. index_addr(denali, addr | 1, 0);
  415. for (i = 0; i < 8; i++)
  416. index_addr_read_data(denali, addr | 2, &id_bytes[i]);
  417. maf_id = id_bytes[0];
  418. device_id = id_bytes[1];
  419. if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
  420. ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
  421. if (FAIL == get_onfi_nand_para(denali))
  422. return FAIL;
  423. } else if (maf_id == 0xEC) { /* Samsung NAND */
  424. get_samsung_nand_para(denali, device_id);
  425. } else if (maf_id == 0x98) { /* Toshiba NAND */
  426. get_toshiba_nand_para(denali);
  427. } else if (maf_id == 0xAD) { /* Hynix NAND */
  428. get_hynix_nand_para(denali, device_id);
  429. }
  430. dev_info(denali->dev,
  431. "Dump timing register values:\n"
  432. "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
  433. "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
  434. "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
  435. ioread32(denali->flash_reg + ACC_CLKS),
  436. ioread32(denali->flash_reg + RE_2_WE),
  437. ioread32(denali->flash_reg + RE_2_RE),
  438. ioread32(denali->flash_reg + WE_2_RE),
  439. ioread32(denali->flash_reg + ADDR_2_DATA),
  440. ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
  441. ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
  442. ioread32(denali->flash_reg + CS_SETUP_CNT));
  443. find_valid_banks(denali);
  444. /*
  445. * If the user specified to override the default timings
  446. * with a specific ONFI mode, we apply those changes here.
  447. */
  448. if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
  449. nand_onfi_timing_set(denali, onfi_timing_mode);
  450. return status;
  451. }
  452. static void denali_set_intr_modes(struct denali_nand_info *denali,
  453. uint16_t INT_ENABLE)
  454. {
  455. if (INT_ENABLE)
  456. iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
  457. else
  458. iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
  459. }
  460. /*
  461. * validation function to verify that the controlling software is making
  462. * a valid request
  463. */
  464. static inline bool is_flash_bank_valid(int flash_bank)
  465. {
  466. return flash_bank >= 0 && flash_bank < 4;
  467. }
  468. static void denali_irq_init(struct denali_nand_info *denali)
  469. {
  470. uint32_t int_mask;
  471. int i;
  472. /* Disable global interrupts */
  473. denali_set_intr_modes(denali, false);
  474. int_mask = DENALI_IRQ_ALL;
  475. /* Clear all status bits */
  476. for (i = 0; i < denali->max_banks; ++i)
  477. iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
  478. denali_irq_enable(denali, int_mask);
  479. }
  480. static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
  481. {
  482. denali_set_intr_modes(denali, false);
  483. }
  484. static void denali_irq_enable(struct denali_nand_info *denali,
  485. uint32_t int_mask)
  486. {
  487. int i;
  488. for (i = 0; i < denali->max_banks; ++i)
  489. iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
  490. }
  491. /*
  492. * This function only returns when an interrupt that this driver cares about
  493. * occurs. This is to reduce the overhead of servicing interrupts
  494. */
  495. static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
  496. {
  497. return read_interrupt_status(denali) & DENALI_IRQ_ALL;
  498. }
  499. /* Interrupts are cleared by writing a 1 to the appropriate status bit */
  500. static inline void clear_interrupt(struct denali_nand_info *denali,
  501. uint32_t irq_mask)
  502. {
  503. uint32_t intr_status_reg;
  504. intr_status_reg = INTR_STATUS(denali->flash_bank);
  505. iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
  506. }
  507. static void clear_interrupts(struct denali_nand_info *denali)
  508. {
  509. uint32_t status;
  510. spin_lock_irq(&denali->irq_lock);
  511. status = read_interrupt_status(denali);
  512. clear_interrupt(denali, status);
  513. denali->irq_status = 0x0;
  514. spin_unlock_irq(&denali->irq_lock);
  515. }
  516. static uint32_t read_interrupt_status(struct denali_nand_info *denali)
  517. {
  518. uint32_t intr_status_reg;
  519. intr_status_reg = INTR_STATUS(denali->flash_bank);
  520. return ioread32(denali->flash_reg + intr_status_reg);
  521. }
  522. /*
  523. * This is the interrupt service routine. It handles all interrupts
  524. * sent to this device. Note that on CE4100, this is a shared interrupt.
  525. */
  526. static irqreturn_t denali_isr(int irq, void *dev_id)
  527. {
  528. struct denali_nand_info *denali = dev_id;
  529. uint32_t irq_status;
  530. irqreturn_t result = IRQ_NONE;
  531. spin_lock(&denali->irq_lock);
  532. /* check to see if a valid NAND chip has been selected. */
  533. if (is_flash_bank_valid(denali->flash_bank)) {
  534. /*
  535. * check to see if controller generated the interrupt,
  536. * since this is a shared interrupt
  537. */
  538. irq_status = denali_irq_detected(denali);
  539. if (irq_status != 0) {
  540. /* handle interrupt */
  541. /* first acknowledge it */
  542. clear_interrupt(denali, irq_status);
  543. /*
  544. * store the status in the device context for someone
  545. * to read
  546. */
  547. denali->irq_status |= irq_status;
  548. /* notify anyone who cares that it happened */
  549. complete(&denali->complete);
  550. /* tell the OS that we've handled this */
  551. result = IRQ_HANDLED;
  552. }
  553. }
  554. spin_unlock(&denali->irq_lock);
  555. return result;
  556. }
  557. #define BANK(x) ((x) << 24)
  558. static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
  559. {
  560. unsigned long comp_res;
  561. uint32_t intr_status;
  562. unsigned long timeout = msecs_to_jiffies(1000);
  563. do {
  564. comp_res =
  565. wait_for_completion_timeout(&denali->complete, timeout);
  566. spin_lock_irq(&denali->irq_lock);
  567. intr_status = denali->irq_status;
  568. if (intr_status & irq_mask) {
  569. denali->irq_status &= ~irq_mask;
  570. spin_unlock_irq(&denali->irq_lock);
  571. /* our interrupt was detected */
  572. break;
  573. }
  574. /*
  575. * these are not the interrupts you are looking for -
  576. * need to wait again
  577. */
  578. spin_unlock_irq(&denali->irq_lock);
  579. } while (comp_res != 0);
  580. if (comp_res == 0) {
  581. /* timeout */
  582. pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
  583. intr_status, irq_mask);
  584. intr_status = 0;
  585. }
  586. return intr_status;
  587. }
  588. /*
  589. * This helper function setups the registers for ECC and whether or not
  590. * the spare area will be transferred.
  591. */
  592. static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
  593. bool transfer_spare)
  594. {
  595. int ecc_en_flag, transfer_spare_flag;
  596. /* set ECC, transfer spare bits if needed */
  597. ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
  598. transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
  599. /* Enable spare area/ECC per user's request. */
  600. iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
  601. iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
  602. }
  603. /*
  604. * sends a pipeline command operation to the controller. See the Denali NAND
  605. * controller's user guide for more information (section 4.2.3.6).
  606. */
  607. static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
  608. bool ecc_en, bool transfer_spare,
  609. int access_type, int op)
  610. {
  611. int status = PASS;
  612. uint32_t page_count = 1;
  613. uint32_t addr, cmd, irq_status, irq_mask;
  614. if (op == DENALI_READ)
  615. irq_mask = INTR_STATUS__LOAD_COMP;
  616. else if (op == DENALI_WRITE)
  617. irq_mask = 0;
  618. else
  619. BUG();
  620. setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
  621. clear_interrupts(denali);
  622. addr = BANK(denali->flash_bank) | denali->page;
  623. if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
  624. cmd = MODE_01 | addr;
  625. iowrite32(cmd, denali->flash_mem);
  626. } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
  627. /* read spare area */
  628. cmd = MODE_10 | addr;
  629. index_addr(denali, cmd, access_type);
  630. cmd = MODE_01 | addr;
  631. iowrite32(cmd, denali->flash_mem);
  632. } else if (op == DENALI_READ) {
  633. /* setup page read request for access type */
  634. cmd = MODE_10 | addr;
  635. index_addr(denali, cmd, access_type);
  636. /*
  637. * page 33 of the NAND controller spec indicates we should not
  638. * use the pipeline commands in Spare area only mode.
  639. * So we don't.
  640. */
  641. if (access_type == SPARE_ACCESS) {
  642. cmd = MODE_01 | addr;
  643. iowrite32(cmd, denali->flash_mem);
  644. } else {
  645. index_addr(denali, cmd,
  646. PIPELINE_ACCESS | op | page_count);
  647. /*
  648. * wait for command to be accepted
  649. * can always use status0 bit as the
  650. * mask is identical for each bank.
  651. */
  652. irq_status = wait_for_irq(denali, irq_mask);
  653. if (irq_status == 0) {
  654. dev_err(denali->dev,
  655. "cmd, page, addr on timeout (0x%x, 0x%x, 0x%x)\n",
  656. cmd, denali->page, addr);
  657. status = FAIL;
  658. } else {
  659. cmd = MODE_01 | addr;
  660. iowrite32(cmd, denali->flash_mem);
  661. }
  662. }
  663. }
  664. return status;
  665. }
  666. /* helper function that simply writes a buffer to the flash */
  667. static int write_data_to_flash_mem(struct denali_nand_info *denali,
  668. const uint8_t *buf, int len)
  669. {
  670. uint32_t *buf32;
  671. int i;
  672. /*
  673. * verify that the len is a multiple of 4.
  674. * see comment in read_data_from_flash_mem()
  675. */
  676. BUG_ON((len % 4) != 0);
  677. /* write the data to the flash memory */
  678. buf32 = (uint32_t *)buf;
  679. for (i = 0; i < len / 4; i++)
  680. iowrite32(*buf32++, denali->flash_mem + 0x10);
  681. return i * 4; /* intent is to return the number of bytes read */
  682. }
  683. /* helper function that simply reads a buffer from the flash */
  684. static int read_data_from_flash_mem(struct denali_nand_info *denali,
  685. uint8_t *buf, int len)
  686. {
  687. uint32_t *buf32;
  688. int i;
  689. /*
  690. * we assume that len will be a multiple of 4, if not it would be nice
  691. * to know about it ASAP rather than have random failures...
  692. * This assumption is based on the fact that this function is designed
  693. * to be used to read flash pages, which are typically multiples of 4.
  694. */
  695. BUG_ON((len % 4) != 0);
  696. /* transfer the data from the flash */
  697. buf32 = (uint32_t *)buf;
  698. for (i = 0; i < len / 4; i++)
  699. *buf32++ = ioread32(denali->flash_mem + 0x10);
  700. return i * 4; /* intent is to return the number of bytes read */
  701. }
  702. /* writes OOB data to the device */
  703. static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
  704. {
  705. struct denali_nand_info *denali = mtd_to_denali(mtd);
  706. uint32_t irq_status;
  707. uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
  708. INTR_STATUS__PROGRAM_FAIL;
  709. int status = 0;
  710. denali->page = page;
  711. if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
  712. DENALI_WRITE) == PASS) {
  713. write_data_to_flash_mem(denali, buf, mtd->oobsize);
  714. /* wait for operation to complete */
  715. irq_status = wait_for_irq(denali, irq_mask);
  716. if (irq_status == 0) {
  717. dev_err(denali->dev, "OOB write failed\n");
  718. status = -EIO;
  719. }
  720. } else {
  721. dev_err(denali->dev, "unable to send pipeline command\n");
  722. status = -EIO;
  723. }
  724. return status;
  725. }
  726. /* reads OOB data from the device */
  727. static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
  728. {
  729. struct denali_nand_info *denali = mtd_to_denali(mtd);
  730. uint32_t irq_mask = INTR_STATUS__LOAD_COMP;
  731. uint32_t irq_status, addr, cmd;
  732. denali->page = page;
  733. if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
  734. DENALI_READ) == PASS) {
  735. read_data_from_flash_mem(denali, buf, mtd->oobsize);
  736. /*
  737. * wait for command to be accepted
  738. * can always use status0 bit as the
  739. * mask is identical for each bank.
  740. */
  741. irq_status = wait_for_irq(denali, irq_mask);
  742. if (irq_status == 0)
  743. dev_err(denali->dev, "page on OOB timeout %d\n",
  744. denali->page);
  745. /*
  746. * We set the device back to MAIN_ACCESS here as I observed
  747. * instability with the controller if you do a block erase
  748. * and the last transaction was a SPARE_ACCESS. Block erase
  749. * is reliable (according to the MTD test infrastructure)
  750. * if you are in MAIN_ACCESS.
  751. */
  752. addr = BANK(denali->flash_bank) | denali->page;
  753. cmd = MODE_10 | addr;
  754. index_addr(denali, cmd, MAIN_ACCESS);
  755. }
  756. }
  757. /*
  758. * this function examines buffers to see if they contain data that
  759. * indicate that the buffer is part of an erased region of flash.
  760. */
  761. static bool is_erased(uint8_t *buf, int len)
  762. {
  763. int i;
  764. for (i = 0; i < len; i++)
  765. if (buf[i] != 0xFF)
  766. return false;
  767. return true;
  768. }
  769. #define ECC_SECTOR_SIZE 512
  770. #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
  771. #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
  772. #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
  773. #define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
  774. #define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
  775. #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
  776. static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
  777. uint32_t irq_status, unsigned int *max_bitflips)
  778. {
  779. bool check_erased_page = false;
  780. unsigned int bitflips = 0;
  781. if (irq_status & INTR_STATUS__ECC_ERR) {
  782. /* read the ECC errors. we'll ignore them for now */
  783. uint32_t err_address, err_correction_info, err_byte,
  784. err_sector, err_device, err_correction_value;
  785. denali_set_intr_modes(denali, false);
  786. do {
  787. err_address = ioread32(denali->flash_reg +
  788. ECC_ERROR_ADDRESS);
  789. err_sector = ECC_SECTOR(err_address);
  790. err_byte = ECC_BYTE(err_address);
  791. err_correction_info = ioread32(denali->flash_reg +
  792. ERR_CORRECTION_INFO);
  793. err_correction_value =
  794. ECC_CORRECTION_VALUE(err_correction_info);
  795. err_device = ECC_ERR_DEVICE(err_correction_info);
  796. if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
  797. /*
  798. * If err_byte is larger than ECC_SECTOR_SIZE,
  799. * means error happened in OOB, so we ignore
  800. * it. It's no need for us to correct it
  801. * err_device is represented the NAND error
  802. * bits are happened in if there are more
  803. * than one NAND connected.
  804. */
  805. if (err_byte < ECC_SECTOR_SIZE) {
  806. struct mtd_info *mtd =
  807. nand_to_mtd(&denali->nand);
  808. int offset;
  809. offset = (err_sector *
  810. ECC_SECTOR_SIZE +
  811. err_byte) *
  812. denali->devnum +
  813. err_device;
  814. /* correct the ECC error */
  815. buf[offset] ^= err_correction_value;
  816. mtd->ecc_stats.corrected++;
  817. bitflips++;
  818. }
  819. } else {
  820. /*
  821. * if the error is not correctable, need to
  822. * look at the page to see if it is an erased
  823. * page. if so, then it's not a real ECC error
  824. */
  825. check_erased_page = true;
  826. }
  827. } while (!ECC_LAST_ERR(err_correction_info));
  828. /*
  829. * Once handle all ecc errors, controller will triger
  830. * a ECC_TRANSACTION_DONE interrupt, so here just wait
  831. * for a while for this interrupt
  832. */
  833. while (!(read_interrupt_status(denali) &
  834. INTR_STATUS__ECC_TRANSACTION_DONE))
  835. cpu_relax();
  836. clear_interrupts(denali);
  837. denali_set_intr_modes(denali, true);
  838. }
  839. *max_bitflips = bitflips;
  840. return check_erased_page;
  841. }
  842. /* programs the controller to either enable/disable DMA transfers */
  843. static void denali_enable_dma(struct denali_nand_info *denali, bool en)
  844. {
  845. iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
  846. ioread32(denali->flash_reg + DMA_ENABLE);
  847. }
  848. /* setups the HW to perform the data DMA */
  849. static void denali_setup_dma(struct denali_nand_info *denali, int op)
  850. {
  851. uint32_t mode;
  852. const int page_count = 1;
  853. uint32_t addr = denali->buf.dma_buf;
  854. mode = MODE_10 | BANK(denali->flash_bank);
  855. /* DMA is a four step process */
  856. /* 1. setup transfer type and # of pages */
  857. index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
  858. /* 2. set memory high address bits 23:8 */
  859. index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
  860. /* 3. set memory low address bits 23:8 */
  861. index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
  862. /* 4. interrupt when complete, burst len = 64 bytes */
  863. index_addr(denali, mode | 0x14000, 0x2400);
  864. }
  865. /*
  866. * writes a page. user specifies type, and this function handles the
  867. * configuration details.
  868. */
  869. static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
  870. const uint8_t *buf, bool raw_xfer)
  871. {
  872. struct denali_nand_info *denali = mtd_to_denali(mtd);
  873. dma_addr_t addr = denali->buf.dma_buf;
  874. size_t size = mtd->writesize + mtd->oobsize;
  875. uint32_t irq_status;
  876. uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
  877. INTR_STATUS__PROGRAM_FAIL;
  878. /*
  879. * if it is a raw xfer, we want to disable ecc and send the spare area.
  880. * !raw_xfer - enable ecc
  881. * raw_xfer - transfer spare
  882. */
  883. setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
  884. /* copy buffer into DMA buffer */
  885. memcpy(denali->buf.buf, buf, mtd->writesize);
  886. if (raw_xfer) {
  887. /* transfer the data to the spare area */
  888. memcpy(denali->buf.buf + mtd->writesize,
  889. chip->oob_poi,
  890. mtd->oobsize);
  891. }
  892. dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
  893. clear_interrupts(denali);
  894. denali_enable_dma(denali, true);
  895. denali_setup_dma(denali, DENALI_WRITE);
  896. /* wait for operation to complete */
  897. irq_status = wait_for_irq(denali, irq_mask);
  898. if (irq_status == 0) {
  899. dev_err(denali->dev, "timeout on write_page (type = %d)\n",
  900. raw_xfer);
  901. denali->status = NAND_STATUS_FAIL;
  902. }
  903. denali_enable_dma(denali, false);
  904. dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
  905. return 0;
  906. }
  907. /* NAND core entry points */
  908. /*
  909. * this is the callback that the NAND core calls to write a page. Since
  910. * writing a page with ECC or without is similar, all the work is done
  911. * by write_page above.
  912. */
  913. static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  914. const uint8_t *buf, int oob_required, int page)
  915. {
  916. /*
  917. * for regular page writes, we let HW handle all the ECC
  918. * data written to the device.
  919. */
  920. return write_page(mtd, chip, buf, false);
  921. }
  922. /*
  923. * This is the callback that the NAND core calls to write a page without ECC.
  924. * raw access is similar to ECC page writes, so all the work is done in the
  925. * write_page() function above.
  926. */
  927. static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  928. const uint8_t *buf, int oob_required,
  929. int page)
  930. {
  931. /*
  932. * for raw page writes, we want to disable ECC and simply write
  933. * whatever data is in the buffer.
  934. */
  935. return write_page(mtd, chip, buf, true);
  936. }
  937. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  938. int page)
  939. {
  940. return write_oob_data(mtd, chip->oob_poi, page);
  941. }
  942. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  943. int page)
  944. {
  945. read_oob_data(mtd, chip->oob_poi, page);
  946. return 0;
  947. }
  948. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  949. uint8_t *buf, int oob_required, int page)
  950. {
  951. unsigned int max_bitflips;
  952. struct denali_nand_info *denali = mtd_to_denali(mtd);
  953. dma_addr_t addr = denali->buf.dma_buf;
  954. size_t size = mtd->writesize + mtd->oobsize;
  955. uint32_t irq_status;
  956. uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
  957. INTR_STATUS__ECC_ERR;
  958. bool check_erased_page = false;
  959. if (page != denali->page) {
  960. dev_err(denali->dev,
  961. "IN %s: page %d is not equal to denali->page %d",
  962. __func__, page, denali->page);
  963. BUG();
  964. }
  965. setup_ecc_for_xfer(denali, true, false);
  966. denali_enable_dma(denali, true);
  967. dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
  968. clear_interrupts(denali);
  969. denali_setup_dma(denali, DENALI_READ);
  970. /* wait for operation to complete */
  971. irq_status = wait_for_irq(denali, irq_mask);
  972. dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
  973. memcpy(buf, denali->buf.buf, mtd->writesize);
  974. check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
  975. denali_enable_dma(denali, false);
  976. if (check_erased_page) {
  977. read_oob_data(mtd, chip->oob_poi, denali->page);
  978. /* check ECC failures that may have occurred on erased pages */
  979. if (check_erased_page) {
  980. if (!is_erased(buf, mtd->writesize))
  981. mtd->ecc_stats.failed++;
  982. if (!is_erased(buf, mtd->oobsize))
  983. mtd->ecc_stats.failed++;
  984. }
  985. }
  986. return max_bitflips;
  987. }
  988. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  989. uint8_t *buf, int oob_required, int page)
  990. {
  991. struct denali_nand_info *denali = mtd_to_denali(mtd);
  992. dma_addr_t addr = denali->buf.dma_buf;
  993. size_t size = mtd->writesize + mtd->oobsize;
  994. uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
  995. if (page != denali->page) {
  996. dev_err(denali->dev,
  997. "IN %s: page %d is not equal to denali->page %d",
  998. __func__, page, denali->page);
  999. BUG();
  1000. }
  1001. setup_ecc_for_xfer(denali, false, true);
  1002. denali_enable_dma(denali, true);
  1003. dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
  1004. clear_interrupts(denali);
  1005. denali_setup_dma(denali, DENALI_READ);
  1006. /* wait for operation to complete */
  1007. wait_for_irq(denali, irq_mask);
  1008. dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
  1009. denali_enable_dma(denali, false);
  1010. memcpy(buf, denali->buf.buf, mtd->writesize);
  1011. memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
  1012. return 0;
  1013. }
  1014. static uint8_t denali_read_byte(struct mtd_info *mtd)
  1015. {
  1016. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1017. uint8_t result = 0xff;
  1018. if (denali->buf.head < denali->buf.tail)
  1019. result = denali->buf.buf[denali->buf.head++];
  1020. return result;
  1021. }
  1022. static void denali_select_chip(struct mtd_info *mtd, int chip)
  1023. {
  1024. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1025. spin_lock_irq(&denali->irq_lock);
  1026. denali->flash_bank = chip;
  1027. spin_unlock_irq(&denali->irq_lock);
  1028. }
  1029. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  1030. {
  1031. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1032. int status = denali->status;
  1033. denali->status = 0;
  1034. return status;
  1035. }
  1036. static int denali_erase(struct mtd_info *mtd, int page)
  1037. {
  1038. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1039. uint32_t cmd, irq_status;
  1040. clear_interrupts(denali);
  1041. /* setup page read request for access type */
  1042. cmd = MODE_10 | BANK(denali->flash_bank) | page;
  1043. index_addr(denali, cmd, 0x1);
  1044. /* wait for erase to complete or failure to occur */
  1045. irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
  1046. INTR_STATUS__ERASE_FAIL);
  1047. return irq_status & INTR_STATUS__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
  1048. }
  1049. static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
  1050. int page)
  1051. {
  1052. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1053. uint32_t addr, id;
  1054. int i;
  1055. switch (cmd) {
  1056. case NAND_CMD_PAGEPROG:
  1057. break;
  1058. case NAND_CMD_STATUS:
  1059. read_status(denali);
  1060. break;
  1061. case NAND_CMD_READID:
  1062. case NAND_CMD_PARAM:
  1063. reset_buf(denali);
  1064. /*
  1065. * sometimes ManufactureId read from register is not right
  1066. * e.g. some of Micron MT29F32G08QAA MLC NAND chips
  1067. * So here we send READID cmd to NAND insteand
  1068. */
  1069. addr = MODE_11 | BANK(denali->flash_bank);
  1070. index_addr(denali, addr | 0, 0x90);
  1071. index_addr(denali, addr | 1, col);
  1072. for (i = 0; i < 8; i++) {
  1073. index_addr_read_data(denali, addr | 2, &id);
  1074. write_byte_to_buf(denali, id);
  1075. }
  1076. break;
  1077. case NAND_CMD_READ0:
  1078. case NAND_CMD_SEQIN:
  1079. denali->page = page;
  1080. break;
  1081. case NAND_CMD_RESET:
  1082. reset_bank(denali);
  1083. break;
  1084. case NAND_CMD_READOOB:
  1085. /* TODO: Read OOB data */
  1086. break;
  1087. default:
  1088. pr_err(": unsupported command received 0x%x\n", cmd);
  1089. break;
  1090. }
  1091. }
  1092. /* end NAND core entry points */
  1093. /* Initialization code to bring the device up to a known good state */
  1094. static void denali_hw_init(struct denali_nand_info *denali)
  1095. {
  1096. /*
  1097. * tell driver how many bit controller will skip before
  1098. * writing ECC code in OOB, this register may be already
  1099. * set by firmware. So we read this value out.
  1100. * if this value is 0, just let it be.
  1101. */
  1102. denali->bbtskipbytes = ioread32(denali->flash_reg +
  1103. SPARE_AREA_SKIP_BYTES);
  1104. detect_max_banks(denali);
  1105. denali_nand_reset(denali);
  1106. iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
  1107. iowrite32(CHIP_EN_DONT_CARE__FLAG,
  1108. denali->flash_reg + CHIP_ENABLE_DONT_CARE);
  1109. iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
  1110. /* Should set value for these registers when init */
  1111. iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
  1112. iowrite32(1, denali->flash_reg + ECC_ENABLE);
  1113. denali_nand_timing_set(denali);
  1114. denali_irq_init(denali);
  1115. }
  1116. /*
  1117. * Althogh controller spec said SLC ECC is forceb to be 4bit,
  1118. * but denali controller in MRST only support 15bit and 8bit ECC
  1119. * correction
  1120. */
  1121. #define ECC_8BITS 14
  1122. #define ECC_15BITS 26
  1123. static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
  1124. struct mtd_oob_region *oobregion)
  1125. {
  1126. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1127. struct nand_chip *chip = mtd_to_nand(mtd);
  1128. if (section)
  1129. return -ERANGE;
  1130. oobregion->offset = denali->bbtskipbytes;
  1131. oobregion->length = chip->ecc.total;
  1132. return 0;
  1133. }
  1134. static int denali_ooblayout_free(struct mtd_info *mtd, int section,
  1135. struct mtd_oob_region *oobregion)
  1136. {
  1137. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1138. struct nand_chip *chip = mtd_to_nand(mtd);
  1139. if (section)
  1140. return -ERANGE;
  1141. oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
  1142. oobregion->length = mtd->oobsize - oobregion->offset;
  1143. return 0;
  1144. }
  1145. static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
  1146. .ecc = denali_ooblayout_ecc,
  1147. .free = denali_ooblayout_free,
  1148. };
  1149. static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
  1150. static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
  1151. static struct nand_bbt_descr bbt_main_descr = {
  1152. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  1153. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  1154. .offs = 8,
  1155. .len = 4,
  1156. .veroffs = 12,
  1157. .maxblocks = 4,
  1158. .pattern = bbt_pattern,
  1159. };
  1160. static struct nand_bbt_descr bbt_mirror_descr = {
  1161. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  1162. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  1163. .offs = 8,
  1164. .len = 4,
  1165. .veroffs = 12,
  1166. .maxblocks = 4,
  1167. .pattern = mirror_pattern,
  1168. };
  1169. /* initialize driver data structures */
  1170. static void denali_drv_init(struct denali_nand_info *denali)
  1171. {
  1172. /*
  1173. * the completion object will be used to notify
  1174. * the callee that the interrupt is done
  1175. */
  1176. init_completion(&denali->complete);
  1177. /*
  1178. * the spinlock will be used to synchronize the ISR with any
  1179. * element that might be access shared data (interrupt status)
  1180. */
  1181. spin_lock_init(&denali->irq_lock);
  1182. /* indicate that MTD has not selected a valid bank yet */
  1183. denali->flash_bank = CHIP_SELECT_INVALID;
  1184. /* initialize our irq_status variable to indicate no interrupts */
  1185. denali->irq_status = 0;
  1186. }
  1187. int denali_init(struct denali_nand_info *denali)
  1188. {
  1189. struct mtd_info *mtd = nand_to_mtd(&denali->nand);
  1190. int ret;
  1191. if (denali->platform == INTEL_CE4100) {
  1192. /*
  1193. * Due to a silicon limitation, we can only support
  1194. * ONFI timing mode 1 and below.
  1195. */
  1196. if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
  1197. pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
  1198. return -EINVAL;
  1199. }
  1200. }
  1201. /* allocate a temporary buffer for nand_scan_ident() */
  1202. denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
  1203. GFP_DMA | GFP_KERNEL);
  1204. if (!denali->buf.buf)
  1205. return -ENOMEM;
  1206. mtd->dev.parent = denali->dev;
  1207. denali_hw_init(denali);
  1208. denali_drv_init(denali);
  1209. /* Request IRQ after all the hardware initialization is finished */
  1210. ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
  1211. IRQF_SHARED, DENALI_NAND_NAME, denali);
  1212. if (ret) {
  1213. dev_err(denali->dev, "Unable to request IRQ\n");
  1214. return ret;
  1215. }
  1216. /* now that our ISR is registered, we can enable interrupts */
  1217. denali_set_intr_modes(denali, true);
  1218. mtd->name = "denali-nand";
  1219. /* register the driver with the NAND core subsystem */
  1220. denali->nand.select_chip = denali_select_chip;
  1221. denali->nand.cmdfunc = denali_cmdfunc;
  1222. denali->nand.read_byte = denali_read_byte;
  1223. denali->nand.waitfunc = denali_waitfunc;
  1224. /*
  1225. * scan for NAND devices attached to the controller
  1226. * this is the first stage in a two step process to register
  1227. * with the nand subsystem
  1228. */
  1229. ret = nand_scan_ident(mtd, denali->max_banks, NULL);
  1230. if (ret)
  1231. goto failed_req_irq;
  1232. /* allocate the right size buffer now */
  1233. devm_kfree(denali->dev, denali->buf.buf);
  1234. denali->buf.buf = devm_kzalloc(denali->dev,
  1235. mtd->writesize + mtd->oobsize,
  1236. GFP_KERNEL);
  1237. if (!denali->buf.buf) {
  1238. ret = -ENOMEM;
  1239. goto failed_req_irq;
  1240. }
  1241. /* Is 32-bit DMA supported? */
  1242. ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
  1243. if (ret) {
  1244. dev_err(denali->dev, "No usable DMA configuration\n");
  1245. goto failed_req_irq;
  1246. }
  1247. denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
  1248. mtd->writesize + mtd->oobsize,
  1249. DMA_BIDIRECTIONAL);
  1250. if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
  1251. dev_err(denali->dev, "Failed to map DMA buffer\n");
  1252. ret = -EIO;
  1253. goto failed_req_irq;
  1254. }
  1255. /*
  1256. * support for multi nand
  1257. * MTD known nothing about multi nand, so we should tell it
  1258. * the real pagesize and anything necessery
  1259. */
  1260. denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
  1261. denali->nand.chipsize <<= denali->devnum - 1;
  1262. denali->nand.page_shift += denali->devnum - 1;
  1263. denali->nand.pagemask = (denali->nand.chipsize >>
  1264. denali->nand.page_shift) - 1;
  1265. denali->nand.bbt_erase_shift += denali->devnum - 1;
  1266. denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
  1267. denali->nand.chip_shift += denali->devnum - 1;
  1268. mtd->writesize <<= denali->devnum - 1;
  1269. mtd->oobsize <<= denali->devnum - 1;
  1270. mtd->erasesize <<= denali->devnum - 1;
  1271. mtd->size = denali->nand.numchips * denali->nand.chipsize;
  1272. denali->bbtskipbytes *= denali->devnum;
  1273. /*
  1274. * second stage of the NAND scan
  1275. * this stage requires information regarding ECC and
  1276. * bad block management.
  1277. */
  1278. /* Bad block management */
  1279. denali->nand.bbt_td = &bbt_main_descr;
  1280. denali->nand.bbt_md = &bbt_mirror_descr;
  1281. /* skip the scan for now until we have OOB read and write support */
  1282. denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
  1283. denali->nand.options |= NAND_SKIP_BBTSCAN;
  1284. denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
  1285. /* no subpage writes on denali */
  1286. denali->nand.options |= NAND_NO_SUBPAGE_WRITE;
  1287. /*
  1288. * Denali Controller only support 15bit and 8bit ECC in MRST,
  1289. * so just let controller do 15bit ECC for MLC and 8bit ECC for
  1290. * SLC if possible.
  1291. * */
  1292. if (!nand_is_slc(&denali->nand) &&
  1293. (mtd->oobsize > (denali->bbtskipbytes +
  1294. ECC_15BITS * (mtd->writesize /
  1295. ECC_SECTOR_SIZE)))) {
  1296. /* if MLC OOB size is large enough, use 15bit ECC*/
  1297. denali->nand.ecc.strength = 15;
  1298. denali->nand.ecc.bytes = ECC_15BITS;
  1299. iowrite32(15, denali->flash_reg + ECC_CORRECTION);
  1300. } else if (mtd->oobsize < (denali->bbtskipbytes +
  1301. ECC_8BITS * (mtd->writesize /
  1302. ECC_SECTOR_SIZE))) {
  1303. pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes");
  1304. goto failed_req_irq;
  1305. } else {
  1306. denali->nand.ecc.strength = 8;
  1307. denali->nand.ecc.bytes = ECC_8BITS;
  1308. iowrite32(8, denali->flash_reg + ECC_CORRECTION);
  1309. }
  1310. mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
  1311. denali->nand.ecc.bytes *= denali->devnum;
  1312. denali->nand.ecc.strength *= denali->devnum;
  1313. /* override the default read operations */
  1314. denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
  1315. denali->nand.ecc.read_page = denali_read_page;
  1316. denali->nand.ecc.read_page_raw = denali_read_page_raw;
  1317. denali->nand.ecc.write_page = denali_write_page;
  1318. denali->nand.ecc.write_page_raw = denali_write_page_raw;
  1319. denali->nand.ecc.read_oob = denali_read_oob;
  1320. denali->nand.ecc.write_oob = denali_write_oob;
  1321. denali->nand.erase = denali_erase;
  1322. ret = nand_scan_tail(mtd);
  1323. if (ret)
  1324. goto failed_req_irq;
  1325. ret = mtd_device_register(mtd, NULL, 0);
  1326. if (ret) {
  1327. dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
  1328. goto failed_req_irq;
  1329. }
  1330. return 0;
  1331. failed_req_irq:
  1332. denali_irq_cleanup(denali->irq, denali);
  1333. return ret;
  1334. }
  1335. EXPORT_SYMBOL(denali_init);
  1336. /* driver exit point */
  1337. void denali_remove(struct denali_nand_info *denali)
  1338. {
  1339. struct mtd_info *mtd = nand_to_mtd(&denali->nand);
  1340. /*
  1341. * Pre-compute DMA buffer size to avoid any problems in case
  1342. * nand_release() ever changes in a way that mtd->writesize and
  1343. * mtd->oobsize are not reliable after this call.
  1344. */
  1345. int bufsize = mtd->writesize + mtd->oobsize;
  1346. nand_release(mtd);
  1347. denali_irq_cleanup(denali->irq, denali);
  1348. dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize,
  1349. DMA_BIDIRECTIONAL);
  1350. }
  1351. EXPORT_SYMBOL(denali_remove);