qcom_nandc.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/slab.h>
  15. #include <linux/bitops.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/module.h>
  19. #include <linux/mtd/nand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/delay.h>
  24. /* NANDc reg offsets */
  25. #define NAND_FLASH_CMD 0x00
  26. #define NAND_ADDR0 0x04
  27. #define NAND_ADDR1 0x08
  28. #define NAND_FLASH_CHIP_SELECT 0x0c
  29. #define NAND_EXEC_CMD 0x10
  30. #define NAND_FLASH_STATUS 0x14
  31. #define NAND_BUFFER_STATUS 0x18
  32. #define NAND_DEV0_CFG0 0x20
  33. #define NAND_DEV0_CFG1 0x24
  34. #define NAND_DEV0_ECC_CFG 0x28
  35. #define NAND_DEV1_ECC_CFG 0x2c
  36. #define NAND_DEV1_CFG0 0x30
  37. #define NAND_DEV1_CFG1 0x34
  38. #define NAND_READ_ID 0x40
  39. #define NAND_READ_STATUS 0x44
  40. #define NAND_DEV_CMD0 0xa0
  41. #define NAND_DEV_CMD1 0xa4
  42. #define NAND_DEV_CMD2 0xa8
  43. #define NAND_DEV_CMD_VLD 0xac
  44. #define SFLASHC_BURST_CFG 0xe0
  45. #define NAND_ERASED_CW_DETECT_CFG 0xe8
  46. #define NAND_ERASED_CW_DETECT_STATUS 0xec
  47. #define NAND_EBI2_ECC_BUF_CFG 0xf0
  48. #define FLASH_BUF_ACC 0x100
  49. #define NAND_CTRL 0xf00
  50. #define NAND_VERSION 0xf08
  51. #define NAND_READ_LOCATION_0 0xf20
  52. #define NAND_READ_LOCATION_1 0xf24
  53. /* dummy register offsets, used by write_reg_dma */
  54. #define NAND_DEV_CMD1_RESTORE 0xdead
  55. #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
  56. /* NAND_FLASH_CMD bits */
  57. #define PAGE_ACC BIT(4)
  58. #define LAST_PAGE BIT(5)
  59. /* NAND_FLASH_CHIP_SELECT bits */
  60. #define NAND_DEV_SEL 0
  61. #define DM_EN BIT(2)
  62. /* NAND_FLASH_STATUS bits */
  63. #define FS_OP_ERR BIT(4)
  64. #define FS_READY_BSY_N BIT(5)
  65. #define FS_MPU_ERR BIT(8)
  66. #define FS_DEVICE_STS_ERR BIT(16)
  67. #define FS_DEVICE_WP BIT(23)
  68. /* NAND_BUFFER_STATUS bits */
  69. #define BS_UNCORRECTABLE_BIT BIT(8)
  70. #define BS_CORRECTABLE_ERR_MSK 0x1f
  71. /* NAND_DEVn_CFG0 bits */
  72. #define DISABLE_STATUS_AFTER_WRITE 4
  73. #define CW_PER_PAGE 6
  74. #define UD_SIZE_BYTES 9
  75. #define ECC_PARITY_SIZE_BYTES_RS 19
  76. #define SPARE_SIZE_BYTES 23
  77. #define NUM_ADDR_CYCLES 27
  78. #define STATUS_BFR_READ 30
  79. #define SET_RD_MODE_AFTER_STATUS 31
  80. /* NAND_DEVn_CFG0 bits */
  81. #define DEV0_CFG1_ECC_DISABLE 0
  82. #define WIDE_FLASH 1
  83. #define NAND_RECOVERY_CYCLES 2
  84. #define CS_ACTIVE_BSY 5
  85. #define BAD_BLOCK_BYTE_NUM 6
  86. #define BAD_BLOCK_IN_SPARE_AREA 16
  87. #define WR_RD_BSY_GAP 17
  88. #define ENABLE_BCH_ECC 27
  89. /* NAND_DEV0_ECC_CFG bits */
  90. #define ECC_CFG_ECC_DISABLE 0
  91. #define ECC_SW_RESET 1
  92. #define ECC_MODE 4
  93. #define ECC_PARITY_SIZE_BYTES_BCH 8
  94. #define ECC_NUM_DATA_BYTES 16
  95. #define ECC_FORCE_CLK_OPEN 30
  96. /* NAND_DEV_CMD1 bits */
  97. #define READ_ADDR 0
  98. /* NAND_DEV_CMD_VLD bits */
  99. #define READ_START_VLD 0
  100. /* NAND_EBI2_ECC_BUF_CFG bits */
  101. #define NUM_STEPS 0
  102. /* NAND_ERASED_CW_DETECT_CFG bits */
  103. #define ERASED_CW_ECC_MASK 1
  104. #define AUTO_DETECT_RES 0
  105. #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
  106. #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
  107. #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
  108. #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
  109. #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
  110. /* NAND_ERASED_CW_DETECT_STATUS bits */
  111. #define PAGE_ALL_ERASED BIT(7)
  112. #define CODEWORD_ALL_ERASED BIT(6)
  113. #define PAGE_ERASED BIT(5)
  114. #define CODEWORD_ERASED BIT(4)
  115. #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
  116. #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
  117. /* Version Mask */
  118. #define NAND_VERSION_MAJOR_MASK 0xf0000000
  119. #define NAND_VERSION_MAJOR_SHIFT 28
  120. #define NAND_VERSION_MINOR_MASK 0x0fff0000
  121. #define NAND_VERSION_MINOR_SHIFT 16
  122. /* NAND OP_CMDs */
  123. #define PAGE_READ 0x2
  124. #define PAGE_READ_WITH_ECC 0x3
  125. #define PAGE_READ_WITH_ECC_SPARE 0x4
  126. #define PROGRAM_PAGE 0x6
  127. #define PAGE_PROGRAM_WITH_ECC 0x7
  128. #define PROGRAM_PAGE_SPARE 0x9
  129. #define BLOCK_ERASE 0xa
  130. #define FETCH_ID 0xb
  131. #define RESET_DEVICE 0xd
  132. /*
  133. * the NAND controller performs reads/writes with ECC in 516 byte chunks.
  134. * the driver calls the chunks 'step' or 'codeword' interchangeably
  135. */
  136. #define NANDC_STEP_SIZE 512
  137. /*
  138. * the largest page size we support is 8K, this will have 16 steps/codewords
  139. * of 512 bytes each
  140. */
  141. #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
  142. /* we read at most 3 registers per codeword scan */
  143. #define MAX_REG_RD (3 * MAX_NUM_STEPS)
  144. /* ECC modes supported by the controller */
  145. #define ECC_NONE BIT(0)
  146. #define ECC_RS_4BIT BIT(1)
  147. #define ECC_BCH_4BIT BIT(2)
  148. #define ECC_BCH_8BIT BIT(3)
  149. struct desc_info {
  150. struct list_head node;
  151. enum dma_data_direction dir;
  152. struct scatterlist sgl;
  153. struct dma_async_tx_descriptor *dma_desc;
  154. };
  155. /*
  156. * holds the current register values that we want to write. acts as a contiguous
  157. * chunk of memory which we use to write the controller registers through DMA.
  158. */
  159. struct nandc_regs {
  160. __le32 cmd;
  161. __le32 addr0;
  162. __le32 addr1;
  163. __le32 chip_sel;
  164. __le32 exec;
  165. __le32 cfg0;
  166. __le32 cfg1;
  167. __le32 ecc_bch_cfg;
  168. __le32 clrflashstatus;
  169. __le32 clrreadstatus;
  170. __le32 cmd1;
  171. __le32 vld;
  172. __le32 orig_cmd1;
  173. __le32 orig_vld;
  174. __le32 ecc_buf_cfg;
  175. };
  176. /*
  177. * NAND controller data struct
  178. *
  179. * @controller: base controller structure
  180. * @host_list: list containing all the chips attached to the
  181. * controller
  182. * @dev: parent device
  183. * @base: MMIO base
  184. * @base_dma: physical base address of controller registers
  185. * @core_clk: controller clock
  186. * @aon_clk: another controller clock
  187. *
  188. * @chan: dma channel
  189. * @cmd_crci: ADM DMA CRCI for command flow control
  190. * @data_crci: ADM DMA CRCI for data flow control
  191. * @desc_list: DMA descriptor list (list of desc_infos)
  192. *
  193. * @data_buffer: our local DMA buffer for page read/writes,
  194. * used when we can't use the buffer provided
  195. * by upper layers directly
  196. * @buf_size/count/start: markers for chip->read_buf/write_buf functions
  197. * @reg_read_buf: local buffer for reading back registers via DMA
  198. * @reg_read_pos: marker for data read in reg_read_buf
  199. *
  200. * @regs: a contiguous chunk of memory for DMA register
  201. * writes. contains the register values to be
  202. * written to controller
  203. * @cmd1/vld: some fixed controller register values
  204. * @ecc_modes: supported ECC modes by the current controller,
  205. * initialized via DT match data
  206. */
  207. struct qcom_nand_controller {
  208. struct nand_hw_control controller;
  209. struct list_head host_list;
  210. struct device *dev;
  211. void __iomem *base;
  212. dma_addr_t base_dma;
  213. struct clk *core_clk;
  214. struct clk *aon_clk;
  215. struct dma_chan *chan;
  216. unsigned int cmd_crci;
  217. unsigned int data_crci;
  218. struct list_head desc_list;
  219. u8 *data_buffer;
  220. int buf_size;
  221. int buf_count;
  222. int buf_start;
  223. __le32 *reg_read_buf;
  224. int reg_read_pos;
  225. struct nandc_regs *regs;
  226. u32 cmd1, vld;
  227. u32 ecc_modes;
  228. };
  229. /*
  230. * NAND chip structure
  231. *
  232. * @chip: base NAND chip structure
  233. * @node: list node to add itself to host_list in
  234. * qcom_nand_controller
  235. *
  236. * @cs: chip select value for this chip
  237. * @cw_size: the number of bytes in a single step/codeword
  238. * of a page, consisting of all data, ecc, spare
  239. * and reserved bytes
  240. * @cw_data: the number of bytes within a codeword protected
  241. * by ECC
  242. * @use_ecc: request the controller to use ECC for the
  243. * upcoming read/write
  244. * @bch_enabled: flag to tell whether BCH ECC mode is used
  245. * @ecc_bytes_hw: ECC bytes used by controller hardware for this
  246. * chip
  247. * @status: value to be returned if NAND_CMD_STATUS command
  248. * is executed
  249. * @last_command: keeps track of last command on this chip. used
  250. * for reading correct status
  251. *
  252. * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
  253. * ecc/non-ecc mode for the current nand flash
  254. * device
  255. */
  256. struct qcom_nand_host {
  257. struct nand_chip chip;
  258. struct list_head node;
  259. int cs;
  260. int cw_size;
  261. int cw_data;
  262. bool use_ecc;
  263. bool bch_enabled;
  264. int ecc_bytes_hw;
  265. int spare_bytes;
  266. int bbm_size;
  267. u8 status;
  268. int last_command;
  269. u32 cfg0, cfg1;
  270. u32 cfg0_raw, cfg1_raw;
  271. u32 ecc_buf_cfg;
  272. u32 ecc_bch_cfg;
  273. u32 clrflashstatus;
  274. u32 clrreadstatus;
  275. };
  276. static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
  277. {
  278. return container_of(chip, struct qcom_nand_host, chip);
  279. }
  280. static inline struct qcom_nand_controller *
  281. get_qcom_nand_controller(struct nand_chip *chip)
  282. {
  283. return container_of(chip->controller, struct qcom_nand_controller,
  284. controller);
  285. }
  286. static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
  287. {
  288. return ioread32(nandc->base + offset);
  289. }
  290. static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
  291. u32 val)
  292. {
  293. iowrite32(val, nandc->base + offset);
  294. }
  295. static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
  296. {
  297. switch (offset) {
  298. case NAND_FLASH_CMD:
  299. return &regs->cmd;
  300. case NAND_ADDR0:
  301. return &regs->addr0;
  302. case NAND_ADDR1:
  303. return &regs->addr1;
  304. case NAND_FLASH_CHIP_SELECT:
  305. return &regs->chip_sel;
  306. case NAND_EXEC_CMD:
  307. return &regs->exec;
  308. case NAND_FLASH_STATUS:
  309. return &regs->clrflashstatus;
  310. case NAND_DEV0_CFG0:
  311. return &regs->cfg0;
  312. case NAND_DEV0_CFG1:
  313. return &regs->cfg1;
  314. case NAND_DEV0_ECC_CFG:
  315. return &regs->ecc_bch_cfg;
  316. case NAND_READ_STATUS:
  317. return &regs->clrreadstatus;
  318. case NAND_DEV_CMD1:
  319. return &regs->cmd1;
  320. case NAND_DEV_CMD1_RESTORE:
  321. return &regs->orig_cmd1;
  322. case NAND_DEV_CMD_VLD:
  323. return &regs->vld;
  324. case NAND_DEV_CMD_VLD_RESTORE:
  325. return &regs->orig_vld;
  326. case NAND_EBI2_ECC_BUF_CFG:
  327. return &regs->ecc_buf_cfg;
  328. default:
  329. return NULL;
  330. }
  331. }
  332. static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
  333. u32 val)
  334. {
  335. struct nandc_regs *regs = nandc->regs;
  336. __le32 *reg;
  337. reg = offset_to_nandc_reg(regs, offset);
  338. if (reg)
  339. *reg = cpu_to_le32(val);
  340. }
  341. /* helper to configure address register values */
  342. static void set_address(struct qcom_nand_host *host, u16 column, int page)
  343. {
  344. struct nand_chip *chip = &host->chip;
  345. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  346. if (chip->options & NAND_BUSWIDTH_16)
  347. column >>= 1;
  348. nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
  349. nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
  350. }
  351. /*
  352. * update_rw_regs: set up read/write register values, these will be
  353. * written to the NAND controller registers via DMA
  354. *
  355. * @num_cw: number of steps for the read/write operation
  356. * @read: read or write operation
  357. */
  358. static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
  359. {
  360. struct nand_chip *chip = &host->chip;
  361. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  362. u32 cmd, cfg0, cfg1, ecc_bch_cfg;
  363. if (read) {
  364. if (host->use_ecc)
  365. cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
  366. else
  367. cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
  368. } else {
  369. cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
  370. }
  371. if (host->use_ecc) {
  372. cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
  373. (num_cw - 1) << CW_PER_PAGE;
  374. cfg1 = host->cfg1;
  375. ecc_bch_cfg = host->ecc_bch_cfg;
  376. } else {
  377. cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
  378. (num_cw - 1) << CW_PER_PAGE;
  379. cfg1 = host->cfg1_raw;
  380. ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  381. }
  382. nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
  383. nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
  384. nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
  385. nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
  386. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
  387. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  388. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  389. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  390. }
  391. static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
  392. int reg_off, const void *vaddr, int size,
  393. bool flow_control)
  394. {
  395. struct desc_info *desc;
  396. struct dma_async_tx_descriptor *dma_desc;
  397. struct scatterlist *sgl;
  398. struct dma_slave_config slave_conf;
  399. enum dma_transfer_direction dir_eng;
  400. int ret;
  401. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  402. if (!desc)
  403. return -ENOMEM;
  404. sgl = &desc->sgl;
  405. sg_init_one(sgl, vaddr, size);
  406. if (read) {
  407. dir_eng = DMA_DEV_TO_MEM;
  408. desc->dir = DMA_FROM_DEVICE;
  409. } else {
  410. dir_eng = DMA_MEM_TO_DEV;
  411. desc->dir = DMA_TO_DEVICE;
  412. }
  413. ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
  414. if (ret == 0) {
  415. ret = -ENOMEM;
  416. goto err;
  417. }
  418. memset(&slave_conf, 0x00, sizeof(slave_conf));
  419. slave_conf.device_fc = flow_control;
  420. if (read) {
  421. slave_conf.src_maxburst = 16;
  422. slave_conf.src_addr = nandc->base_dma + reg_off;
  423. slave_conf.slave_id = nandc->data_crci;
  424. } else {
  425. slave_conf.dst_maxburst = 16;
  426. slave_conf.dst_addr = nandc->base_dma + reg_off;
  427. slave_conf.slave_id = nandc->cmd_crci;
  428. }
  429. ret = dmaengine_slave_config(nandc->chan, &slave_conf);
  430. if (ret) {
  431. dev_err(nandc->dev, "failed to configure dma channel\n");
  432. goto err;
  433. }
  434. dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
  435. if (!dma_desc) {
  436. dev_err(nandc->dev, "failed to prepare desc\n");
  437. ret = -EINVAL;
  438. goto err;
  439. }
  440. desc->dma_desc = dma_desc;
  441. list_add_tail(&desc->node, &nandc->desc_list);
  442. return 0;
  443. err:
  444. kfree(desc);
  445. return ret;
  446. }
  447. /*
  448. * read_reg_dma: prepares a descriptor to read a given number of
  449. * contiguous registers to the reg_read_buf pointer
  450. *
  451. * @first: offset of the first register in the contiguous block
  452. * @num_regs: number of registers to read
  453. */
  454. static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
  455. int num_regs)
  456. {
  457. bool flow_control = false;
  458. void *vaddr;
  459. int size;
  460. if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
  461. flow_control = true;
  462. size = num_regs * sizeof(u32);
  463. vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
  464. nandc->reg_read_pos += num_regs;
  465. return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
  466. }
  467. /*
  468. * write_reg_dma: prepares a descriptor to write a given number of
  469. * contiguous registers
  470. *
  471. * @first: offset of the first register in the contiguous block
  472. * @num_regs: number of registers to write
  473. */
  474. static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
  475. int num_regs)
  476. {
  477. bool flow_control = false;
  478. struct nandc_regs *regs = nandc->regs;
  479. void *vaddr;
  480. int size;
  481. vaddr = offset_to_nandc_reg(regs, first);
  482. if (first == NAND_FLASH_CMD)
  483. flow_control = true;
  484. if (first == NAND_DEV_CMD1_RESTORE)
  485. first = NAND_DEV_CMD1;
  486. if (first == NAND_DEV_CMD_VLD_RESTORE)
  487. first = NAND_DEV_CMD_VLD;
  488. size = num_regs * sizeof(u32);
  489. return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
  490. }
  491. /*
  492. * read_data_dma: prepares a DMA descriptor to transfer data from the
  493. * controller's internal buffer to the buffer 'vaddr'
  494. *
  495. * @reg_off: offset within the controller's data buffer
  496. * @vaddr: virtual address of the buffer we want to write to
  497. * @size: DMA transaction size in bytes
  498. */
  499. static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  500. const u8 *vaddr, int size)
  501. {
  502. return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
  503. }
  504. /*
  505. * write_data_dma: prepares a DMA descriptor to transfer data from
  506. * 'vaddr' to the controller's internal buffer
  507. *
  508. * @reg_off: offset within the controller's data buffer
  509. * @vaddr: virtual address of the buffer we want to read from
  510. * @size: DMA transaction size in bytes
  511. */
  512. static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  513. const u8 *vaddr, int size)
  514. {
  515. return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
  516. }
  517. /*
  518. * helper to prepare dma descriptors to configure registers needed for reading a
  519. * codeword/step in a page
  520. */
  521. static void config_cw_read(struct qcom_nand_controller *nandc)
  522. {
  523. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  524. write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
  525. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
  526. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  527. read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
  528. read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
  529. }
  530. /*
  531. * helpers to prepare dma descriptors used to configure registers needed for
  532. * writing a codeword/step in a page
  533. */
  534. static void config_cw_write_pre(struct qcom_nand_controller *nandc)
  535. {
  536. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  537. write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
  538. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
  539. }
  540. static void config_cw_write_post(struct qcom_nand_controller *nandc)
  541. {
  542. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  543. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  544. write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  545. write_reg_dma(nandc, NAND_READ_STATUS, 1);
  546. }
  547. /*
  548. * the following functions are used within chip->cmdfunc() to perform different
  549. * NAND_CMD_* commands
  550. */
  551. /* sets up descriptors for NAND_CMD_PARAM */
  552. static int nandc_param(struct qcom_nand_host *host)
  553. {
  554. struct nand_chip *chip = &host->chip;
  555. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  556. /*
  557. * NAND_CMD_PARAM is called before we know much about the FLASH chip
  558. * in use. we configure the controller to perform a raw read of 512
  559. * bytes to read onfi params
  560. */
  561. nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
  562. nandc_set_reg(nandc, NAND_ADDR0, 0);
  563. nandc_set_reg(nandc, NAND_ADDR1, 0);
  564. nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
  565. | 512 << UD_SIZE_BYTES
  566. | 5 << NUM_ADDR_CYCLES
  567. | 0 << SPARE_SIZE_BYTES);
  568. nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
  569. | 0 << CS_ACTIVE_BSY
  570. | 17 << BAD_BLOCK_BYTE_NUM
  571. | 1 << BAD_BLOCK_IN_SPARE_AREA
  572. | 2 << WR_RD_BSY_GAP
  573. | 0 << WIDE_FLASH
  574. | 1 << DEV0_CFG1_ECC_DISABLE);
  575. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
  576. /* configure CMD1 and VLD for ONFI param probing */
  577. nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
  578. (nandc->vld & ~(1 << READ_START_VLD))
  579. | 0 << READ_START_VLD);
  580. nandc_set_reg(nandc, NAND_DEV_CMD1,
  581. (nandc->cmd1 & ~(0xFF << READ_ADDR))
  582. | NAND_CMD_PARAM << READ_ADDR);
  583. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  584. nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
  585. nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
  586. write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
  587. write_reg_dma(nandc, NAND_DEV_CMD1, 1);
  588. nandc->buf_count = 512;
  589. memset(nandc->data_buffer, 0xff, nandc->buf_count);
  590. config_cw_read(nandc);
  591. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  592. nandc->buf_count);
  593. /* restore CMD1 and VLD regs */
  594. write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
  595. write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
  596. return 0;
  597. }
  598. /* sets up descriptors for NAND_CMD_ERASE1 */
  599. static int erase_block(struct qcom_nand_host *host, int page_addr)
  600. {
  601. struct nand_chip *chip = &host->chip;
  602. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  603. nandc_set_reg(nandc, NAND_FLASH_CMD,
  604. BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
  605. nandc_set_reg(nandc, NAND_ADDR0, page_addr);
  606. nandc_set_reg(nandc, NAND_ADDR1, 0);
  607. nandc_set_reg(nandc, NAND_DEV0_CFG0,
  608. host->cfg0_raw & ~(7 << CW_PER_PAGE));
  609. nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
  610. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  611. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  612. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  613. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  614. write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
  615. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  616. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  617. write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  618. write_reg_dma(nandc, NAND_READ_STATUS, 1);
  619. return 0;
  620. }
  621. /* sets up descriptors for NAND_CMD_READID */
  622. static int read_id(struct qcom_nand_host *host, int column)
  623. {
  624. struct nand_chip *chip = &host->chip;
  625. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  626. if (column == -1)
  627. return 0;
  628. nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
  629. nandc_set_reg(nandc, NAND_ADDR0, column);
  630. nandc_set_reg(nandc, NAND_ADDR1, 0);
  631. nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  632. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  633. write_reg_dma(nandc, NAND_FLASH_CMD, 4);
  634. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  635. read_reg_dma(nandc, NAND_READ_ID, 1);
  636. return 0;
  637. }
  638. /* sets up descriptors for NAND_CMD_RESET */
  639. static int reset(struct qcom_nand_host *host)
  640. {
  641. struct nand_chip *chip = &host->chip;
  642. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  643. nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
  644. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  645. write_reg_dma(nandc, NAND_FLASH_CMD, 1);
  646. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  647. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  648. return 0;
  649. }
  650. /* helpers to submit/free our list of dma descriptors */
  651. static int submit_descs(struct qcom_nand_controller *nandc)
  652. {
  653. struct desc_info *desc;
  654. dma_cookie_t cookie = 0;
  655. list_for_each_entry(desc, &nandc->desc_list, node)
  656. cookie = dmaengine_submit(desc->dma_desc);
  657. if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
  658. return -ETIMEDOUT;
  659. return 0;
  660. }
  661. static void free_descs(struct qcom_nand_controller *nandc)
  662. {
  663. struct desc_info *desc, *n;
  664. list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
  665. list_del(&desc->node);
  666. dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
  667. kfree(desc);
  668. }
  669. }
  670. /* reset the register read buffer for next NAND operation */
  671. static void clear_read_regs(struct qcom_nand_controller *nandc)
  672. {
  673. nandc->reg_read_pos = 0;
  674. memset(nandc->reg_read_buf, 0,
  675. MAX_REG_RD * sizeof(*nandc->reg_read_buf));
  676. }
  677. static void pre_command(struct qcom_nand_host *host, int command)
  678. {
  679. struct nand_chip *chip = &host->chip;
  680. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  681. nandc->buf_count = 0;
  682. nandc->buf_start = 0;
  683. host->use_ecc = false;
  684. host->last_command = command;
  685. clear_read_regs(nandc);
  686. }
  687. /*
  688. * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
  689. * privately maintained status byte, this status byte can be read after
  690. * NAND_CMD_STATUS is called
  691. */
  692. static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
  693. {
  694. struct nand_chip *chip = &host->chip;
  695. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  696. struct nand_ecc_ctrl *ecc = &chip->ecc;
  697. int num_cw;
  698. int i;
  699. num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
  700. for (i = 0; i < num_cw; i++) {
  701. u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
  702. if (flash_status & FS_MPU_ERR)
  703. host->status &= ~NAND_STATUS_WP;
  704. if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
  705. (flash_status &
  706. FS_DEVICE_STS_ERR)))
  707. host->status |= NAND_STATUS_FAIL;
  708. }
  709. }
  710. static void post_command(struct qcom_nand_host *host, int command)
  711. {
  712. struct nand_chip *chip = &host->chip;
  713. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  714. switch (command) {
  715. case NAND_CMD_READID:
  716. memcpy(nandc->data_buffer, nandc->reg_read_buf,
  717. nandc->buf_count);
  718. break;
  719. case NAND_CMD_PAGEPROG:
  720. case NAND_CMD_ERASE1:
  721. parse_erase_write_errors(host, command);
  722. break;
  723. default:
  724. break;
  725. }
  726. }
  727. /*
  728. * Implements chip->cmdfunc. It's only used for a limited set of commands.
  729. * The rest of the commands wouldn't be called by upper layers. For example,
  730. * NAND_CMD_READOOB would never be called because we have our own versions
  731. * of read_oob ops for nand_ecc_ctrl.
  732. */
  733. static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
  734. int column, int page_addr)
  735. {
  736. struct nand_chip *chip = mtd_to_nand(mtd);
  737. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  738. struct nand_ecc_ctrl *ecc = &chip->ecc;
  739. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  740. bool wait = false;
  741. int ret = 0;
  742. pre_command(host, command);
  743. switch (command) {
  744. case NAND_CMD_RESET:
  745. ret = reset(host);
  746. wait = true;
  747. break;
  748. case NAND_CMD_READID:
  749. nandc->buf_count = 4;
  750. ret = read_id(host, column);
  751. wait = true;
  752. break;
  753. case NAND_CMD_PARAM:
  754. ret = nandc_param(host);
  755. wait = true;
  756. break;
  757. case NAND_CMD_ERASE1:
  758. ret = erase_block(host, page_addr);
  759. wait = true;
  760. break;
  761. case NAND_CMD_READ0:
  762. /* we read the entire page for now */
  763. WARN_ON(column != 0);
  764. host->use_ecc = true;
  765. set_address(host, 0, page_addr);
  766. update_rw_regs(host, ecc->steps, true);
  767. break;
  768. case NAND_CMD_SEQIN:
  769. WARN_ON(column != 0);
  770. set_address(host, 0, page_addr);
  771. break;
  772. case NAND_CMD_PAGEPROG:
  773. case NAND_CMD_STATUS:
  774. case NAND_CMD_NONE:
  775. default:
  776. break;
  777. }
  778. if (ret) {
  779. dev_err(nandc->dev, "failure executing command %d\n",
  780. command);
  781. free_descs(nandc);
  782. return;
  783. }
  784. if (wait) {
  785. ret = submit_descs(nandc);
  786. if (ret)
  787. dev_err(nandc->dev,
  788. "failure submitting descs for command %d\n",
  789. command);
  790. }
  791. free_descs(nandc);
  792. post_command(host, command);
  793. }
  794. /*
  795. * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
  796. * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
  797. *
  798. * when using RS ECC, the HW reports the same erros when reading an erased CW,
  799. * but it notifies that it is an erased CW by placing special characters at
  800. * certain offsets in the buffer.
  801. *
  802. * verify if the page is erased or not, and fix up the page for RS ECC by
  803. * replacing the special characters with 0xff.
  804. */
  805. static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
  806. {
  807. u8 empty1, empty2;
  808. /*
  809. * an erased page flags an error in NAND_FLASH_STATUS, check if the page
  810. * is erased by looking for 0x54s at offsets 3 and 175 from the
  811. * beginning of each codeword
  812. */
  813. empty1 = data_buf[3];
  814. empty2 = data_buf[175];
  815. /*
  816. * if the erased codework markers, if they exist override them with
  817. * 0xffs
  818. */
  819. if ((empty1 == 0x54 && empty2 == 0xff) ||
  820. (empty1 == 0xff && empty2 == 0x54)) {
  821. data_buf[3] = 0xff;
  822. data_buf[175] = 0xff;
  823. }
  824. /*
  825. * check if the entire chunk contains 0xffs or not. if it doesn't, then
  826. * restore the original values at the special offsets
  827. */
  828. if (memchr_inv(data_buf, 0xff, data_len)) {
  829. data_buf[3] = empty1;
  830. data_buf[175] = empty2;
  831. return false;
  832. }
  833. return true;
  834. }
  835. struct read_stats {
  836. __le32 flash;
  837. __le32 buffer;
  838. __le32 erased_cw;
  839. };
  840. /*
  841. * reads back status registers set by the controller to notify page read
  842. * errors. this is equivalent to what 'ecc->correct()' would do.
  843. */
  844. static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
  845. u8 *oob_buf)
  846. {
  847. struct nand_chip *chip = &host->chip;
  848. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  849. struct mtd_info *mtd = nand_to_mtd(chip);
  850. struct nand_ecc_ctrl *ecc = &chip->ecc;
  851. unsigned int max_bitflips = 0;
  852. struct read_stats *buf;
  853. int i;
  854. buf = (struct read_stats *)nandc->reg_read_buf;
  855. for (i = 0; i < ecc->steps; i++, buf++) {
  856. u32 flash, buffer, erased_cw;
  857. int data_len, oob_len;
  858. if (i == (ecc->steps - 1)) {
  859. data_len = ecc->size - ((ecc->steps - 1) << 2);
  860. oob_len = ecc->steps << 2;
  861. } else {
  862. data_len = host->cw_data;
  863. oob_len = 0;
  864. }
  865. flash = le32_to_cpu(buf->flash);
  866. buffer = le32_to_cpu(buf->buffer);
  867. erased_cw = le32_to_cpu(buf->erased_cw);
  868. if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
  869. bool erased;
  870. /* ignore erased codeword errors */
  871. if (host->bch_enabled) {
  872. erased = (erased_cw & ERASED_CW) == ERASED_CW ?
  873. true : false;
  874. } else {
  875. erased = erased_chunk_check_and_fixup(data_buf,
  876. data_len);
  877. }
  878. if (erased) {
  879. data_buf += data_len;
  880. if (oob_buf)
  881. oob_buf += oob_len + ecc->bytes;
  882. continue;
  883. }
  884. if (buffer & BS_UNCORRECTABLE_BIT) {
  885. int ret, ecclen, extraooblen;
  886. void *eccbuf;
  887. eccbuf = oob_buf ? oob_buf + oob_len : NULL;
  888. ecclen = oob_buf ? host->ecc_bytes_hw : 0;
  889. extraooblen = oob_buf ? oob_len : 0;
  890. /*
  891. * make sure it isn't an erased page reported
  892. * as not-erased by HW because of a few bitflips
  893. */
  894. ret = nand_check_erased_ecc_chunk(data_buf,
  895. data_len, eccbuf, ecclen, oob_buf,
  896. extraooblen, ecc->strength);
  897. if (ret < 0) {
  898. mtd->ecc_stats.failed++;
  899. } else {
  900. mtd->ecc_stats.corrected += ret;
  901. max_bitflips =
  902. max_t(unsigned int, max_bitflips, ret);
  903. }
  904. }
  905. } else {
  906. unsigned int stat;
  907. stat = buffer & BS_CORRECTABLE_ERR_MSK;
  908. mtd->ecc_stats.corrected += stat;
  909. max_bitflips = max(max_bitflips, stat);
  910. }
  911. data_buf += data_len;
  912. if (oob_buf)
  913. oob_buf += oob_len + ecc->bytes;
  914. }
  915. return max_bitflips;
  916. }
  917. /*
  918. * helper to perform the actual page read operation, used by ecc->read_page(),
  919. * ecc->read_oob()
  920. */
  921. static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
  922. u8 *oob_buf)
  923. {
  924. struct nand_chip *chip = &host->chip;
  925. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  926. struct nand_ecc_ctrl *ecc = &chip->ecc;
  927. int i, ret;
  928. /* queue cmd descs for each codeword */
  929. for (i = 0; i < ecc->steps; i++) {
  930. int data_size, oob_size;
  931. if (i == (ecc->steps - 1)) {
  932. data_size = ecc->size - ((ecc->steps - 1) << 2);
  933. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  934. host->spare_bytes;
  935. } else {
  936. data_size = host->cw_data;
  937. oob_size = host->ecc_bytes_hw + host->spare_bytes;
  938. }
  939. config_cw_read(nandc);
  940. if (data_buf)
  941. read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
  942. data_size);
  943. /*
  944. * when ecc is enabled, the controller doesn't read the real
  945. * or dummy bad block markers in each chunk. To maintain a
  946. * consistent layout across RAW and ECC reads, we just
  947. * leave the real/dummy BBM offsets empty (i.e, filled with
  948. * 0xffs)
  949. */
  950. if (oob_buf) {
  951. int j;
  952. for (j = 0; j < host->bbm_size; j++)
  953. *oob_buf++ = 0xff;
  954. read_data_dma(nandc, FLASH_BUF_ACC + data_size,
  955. oob_buf, oob_size);
  956. }
  957. if (data_buf)
  958. data_buf += data_size;
  959. if (oob_buf)
  960. oob_buf += oob_size;
  961. }
  962. ret = submit_descs(nandc);
  963. if (ret)
  964. dev_err(nandc->dev, "failure to read page/oob\n");
  965. free_descs(nandc);
  966. return ret;
  967. }
  968. /*
  969. * a helper that copies the last step/codeword of a page (containing free oob)
  970. * into our local buffer
  971. */
  972. static int copy_last_cw(struct qcom_nand_host *host, int page)
  973. {
  974. struct nand_chip *chip = &host->chip;
  975. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  976. struct nand_ecc_ctrl *ecc = &chip->ecc;
  977. int size;
  978. int ret;
  979. clear_read_regs(nandc);
  980. size = host->use_ecc ? host->cw_data : host->cw_size;
  981. /* prepare a clean read buffer */
  982. memset(nandc->data_buffer, 0xff, size);
  983. set_address(host, host->cw_size * (ecc->steps - 1), page);
  984. update_rw_regs(host, 1, true);
  985. config_cw_read(nandc);
  986. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
  987. ret = submit_descs(nandc);
  988. if (ret)
  989. dev_err(nandc->dev, "failed to copy last codeword\n");
  990. free_descs(nandc);
  991. return ret;
  992. }
  993. /* implements ecc->read_page() */
  994. static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  995. uint8_t *buf, int oob_required, int page)
  996. {
  997. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  998. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  999. u8 *data_buf, *oob_buf = NULL;
  1000. int ret;
  1001. data_buf = buf;
  1002. oob_buf = oob_required ? chip->oob_poi : NULL;
  1003. ret = read_page_ecc(host, data_buf, oob_buf);
  1004. if (ret) {
  1005. dev_err(nandc->dev, "failure to read page\n");
  1006. return ret;
  1007. }
  1008. return parse_read_errors(host, data_buf, oob_buf);
  1009. }
  1010. /* implements ecc->read_page_raw() */
  1011. static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
  1012. struct nand_chip *chip, uint8_t *buf,
  1013. int oob_required, int page)
  1014. {
  1015. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1016. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1017. u8 *data_buf, *oob_buf;
  1018. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1019. int i, ret;
  1020. data_buf = buf;
  1021. oob_buf = chip->oob_poi;
  1022. host->use_ecc = false;
  1023. update_rw_regs(host, ecc->steps, true);
  1024. for (i = 0; i < ecc->steps; i++) {
  1025. int data_size1, data_size2, oob_size1, oob_size2;
  1026. int reg_off = FLASH_BUF_ACC;
  1027. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1028. oob_size1 = host->bbm_size;
  1029. if (i == (ecc->steps - 1)) {
  1030. data_size2 = ecc->size - data_size1 -
  1031. ((ecc->steps - 1) << 2);
  1032. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1033. host->spare_bytes;
  1034. } else {
  1035. data_size2 = host->cw_data - data_size1;
  1036. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1037. }
  1038. config_cw_read(nandc);
  1039. read_data_dma(nandc, reg_off, data_buf, data_size1);
  1040. reg_off += data_size1;
  1041. data_buf += data_size1;
  1042. read_data_dma(nandc, reg_off, oob_buf, oob_size1);
  1043. reg_off += oob_size1;
  1044. oob_buf += oob_size1;
  1045. read_data_dma(nandc, reg_off, data_buf, data_size2);
  1046. reg_off += data_size2;
  1047. data_buf += data_size2;
  1048. read_data_dma(nandc, reg_off, oob_buf, oob_size2);
  1049. oob_buf += oob_size2;
  1050. }
  1051. ret = submit_descs(nandc);
  1052. if (ret)
  1053. dev_err(nandc->dev, "failure to read raw page\n");
  1054. free_descs(nandc);
  1055. return 0;
  1056. }
  1057. /* implements ecc->read_oob() */
  1058. static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1059. int page)
  1060. {
  1061. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1062. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1063. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1064. int ret;
  1065. clear_read_regs(nandc);
  1066. host->use_ecc = true;
  1067. set_address(host, 0, page);
  1068. update_rw_regs(host, ecc->steps, true);
  1069. ret = read_page_ecc(host, NULL, chip->oob_poi);
  1070. if (ret)
  1071. dev_err(nandc->dev, "failure to read oob\n");
  1072. return ret;
  1073. }
  1074. /* implements ecc->write_page() */
  1075. static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1076. const uint8_t *buf, int oob_required, int page)
  1077. {
  1078. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1079. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1080. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1081. u8 *data_buf, *oob_buf;
  1082. int i, ret;
  1083. clear_read_regs(nandc);
  1084. data_buf = (u8 *)buf;
  1085. oob_buf = chip->oob_poi;
  1086. host->use_ecc = true;
  1087. update_rw_regs(host, ecc->steps, false);
  1088. for (i = 0; i < ecc->steps; i++) {
  1089. int data_size, oob_size;
  1090. if (i == (ecc->steps - 1)) {
  1091. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1092. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1093. host->spare_bytes;
  1094. } else {
  1095. data_size = host->cw_data;
  1096. oob_size = ecc->bytes;
  1097. }
  1098. config_cw_write_pre(nandc);
  1099. write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
  1100. /*
  1101. * when ECC is enabled, we don't really need to write anything
  1102. * to oob for the first n - 1 codewords since these oob regions
  1103. * just contain ECC bytes that's written by the controller
  1104. * itself. For the last codeword, we skip the bbm positions and
  1105. * write to the free oob area.
  1106. */
  1107. if (i == (ecc->steps - 1)) {
  1108. oob_buf += host->bbm_size;
  1109. write_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1110. oob_buf, oob_size);
  1111. }
  1112. config_cw_write_post(nandc);
  1113. data_buf += data_size;
  1114. oob_buf += oob_size;
  1115. }
  1116. ret = submit_descs(nandc);
  1117. if (ret)
  1118. dev_err(nandc->dev, "failure to write page\n");
  1119. free_descs(nandc);
  1120. return ret;
  1121. }
  1122. /* implements ecc->write_page_raw() */
  1123. static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
  1124. struct nand_chip *chip, const uint8_t *buf,
  1125. int oob_required, int page)
  1126. {
  1127. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1128. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1129. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1130. u8 *data_buf, *oob_buf;
  1131. int i, ret;
  1132. clear_read_regs(nandc);
  1133. data_buf = (u8 *)buf;
  1134. oob_buf = chip->oob_poi;
  1135. host->use_ecc = false;
  1136. update_rw_regs(host, ecc->steps, false);
  1137. for (i = 0; i < ecc->steps; i++) {
  1138. int data_size1, data_size2, oob_size1, oob_size2;
  1139. int reg_off = FLASH_BUF_ACC;
  1140. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1141. oob_size1 = host->bbm_size;
  1142. if (i == (ecc->steps - 1)) {
  1143. data_size2 = ecc->size - data_size1 -
  1144. ((ecc->steps - 1) << 2);
  1145. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1146. host->spare_bytes;
  1147. } else {
  1148. data_size2 = host->cw_data - data_size1;
  1149. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1150. }
  1151. config_cw_write_pre(nandc);
  1152. write_data_dma(nandc, reg_off, data_buf, data_size1);
  1153. reg_off += data_size1;
  1154. data_buf += data_size1;
  1155. write_data_dma(nandc, reg_off, oob_buf, oob_size1);
  1156. reg_off += oob_size1;
  1157. oob_buf += oob_size1;
  1158. write_data_dma(nandc, reg_off, data_buf, data_size2);
  1159. reg_off += data_size2;
  1160. data_buf += data_size2;
  1161. write_data_dma(nandc, reg_off, oob_buf, oob_size2);
  1162. oob_buf += oob_size2;
  1163. config_cw_write_post(nandc);
  1164. }
  1165. ret = submit_descs(nandc);
  1166. if (ret)
  1167. dev_err(nandc->dev, "failure to write raw page\n");
  1168. free_descs(nandc);
  1169. return ret;
  1170. }
  1171. /*
  1172. * implements ecc->write_oob()
  1173. *
  1174. * the NAND controller cannot write only data or only oob within a codeword,
  1175. * since ecc is calculated for the combined codeword. we first copy the
  1176. * entire contents for the last codeword(data + oob), replace the old oob
  1177. * with the new one in chip->oob_poi, and then write the entire codeword.
  1178. * this read-copy-write operation results in a slight performance loss.
  1179. */
  1180. static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1181. int page)
  1182. {
  1183. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1184. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1185. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1186. u8 *oob = chip->oob_poi;
  1187. int data_size, oob_size;
  1188. int ret, status = 0;
  1189. host->use_ecc = true;
  1190. ret = copy_last_cw(host, page);
  1191. if (ret)
  1192. return ret;
  1193. clear_read_regs(nandc);
  1194. /* calculate the data and oob size for the last codeword/step */
  1195. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1196. oob_size = mtd->oobavail;
  1197. /* override new oob content to last codeword */
  1198. mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
  1199. 0, mtd->oobavail);
  1200. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1201. update_rw_regs(host, 1, false);
  1202. config_cw_write_pre(nandc);
  1203. write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  1204. data_size + oob_size);
  1205. config_cw_write_post(nandc);
  1206. ret = submit_descs(nandc);
  1207. free_descs(nandc);
  1208. if (ret) {
  1209. dev_err(nandc->dev, "failure to write oob\n");
  1210. return -EIO;
  1211. }
  1212. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1213. status = chip->waitfunc(mtd, chip);
  1214. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1215. }
  1216. static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
  1217. {
  1218. struct nand_chip *chip = mtd_to_nand(mtd);
  1219. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1220. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1221. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1222. int page, ret, bbpos, bad = 0;
  1223. u32 flash_status;
  1224. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1225. /*
  1226. * configure registers for a raw sub page read, the address is set to
  1227. * the beginning of the last codeword, we don't care about reading ecc
  1228. * portion of oob. we just want the first few bytes from this codeword
  1229. * that contains the BBM
  1230. */
  1231. host->use_ecc = false;
  1232. ret = copy_last_cw(host, page);
  1233. if (ret)
  1234. goto err;
  1235. flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
  1236. if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
  1237. dev_warn(nandc->dev, "error when trying to read BBM\n");
  1238. goto err;
  1239. }
  1240. bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1241. bad = nandc->data_buffer[bbpos] != 0xff;
  1242. if (chip->options & NAND_BUSWIDTH_16)
  1243. bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
  1244. err:
  1245. return bad;
  1246. }
  1247. static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
  1248. {
  1249. struct nand_chip *chip = mtd_to_nand(mtd);
  1250. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1251. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1252. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1253. int page, ret, status = 0;
  1254. clear_read_regs(nandc);
  1255. /*
  1256. * to mark the BBM as bad, we flash the entire last codeword with 0s.
  1257. * we don't care about the rest of the content in the codeword since
  1258. * we aren't going to use this block again
  1259. */
  1260. memset(nandc->data_buffer, 0x00, host->cw_size);
  1261. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1262. /* prepare write */
  1263. host->use_ecc = false;
  1264. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1265. update_rw_regs(host, 1, false);
  1266. config_cw_write_pre(nandc);
  1267. write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
  1268. config_cw_write_post(nandc);
  1269. ret = submit_descs(nandc);
  1270. free_descs(nandc);
  1271. if (ret) {
  1272. dev_err(nandc->dev, "failure to update BBM\n");
  1273. return -EIO;
  1274. }
  1275. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1276. status = chip->waitfunc(mtd, chip);
  1277. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1278. }
  1279. /*
  1280. * the three functions below implement chip->read_byte(), chip->read_buf()
  1281. * and chip->write_buf() respectively. these aren't used for
  1282. * reading/writing page data, they are used for smaller data like reading
  1283. * id, status etc
  1284. */
  1285. static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
  1286. {
  1287. struct nand_chip *chip = mtd_to_nand(mtd);
  1288. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1289. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1290. u8 *buf = nandc->data_buffer;
  1291. u8 ret = 0x0;
  1292. if (host->last_command == NAND_CMD_STATUS) {
  1293. ret = host->status;
  1294. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1295. return ret;
  1296. }
  1297. if (nandc->buf_start < nandc->buf_count)
  1298. ret = buf[nandc->buf_start++];
  1299. return ret;
  1300. }
  1301. static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  1302. {
  1303. struct nand_chip *chip = mtd_to_nand(mtd);
  1304. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1305. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1306. memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
  1307. nandc->buf_start += real_len;
  1308. }
  1309. static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  1310. int len)
  1311. {
  1312. struct nand_chip *chip = mtd_to_nand(mtd);
  1313. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1314. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1315. memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
  1316. nandc->buf_start += real_len;
  1317. }
  1318. /* we support only one external chip for now */
  1319. static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
  1320. {
  1321. struct nand_chip *chip = mtd_to_nand(mtd);
  1322. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1323. if (chipnr <= 0)
  1324. return;
  1325. dev_warn(nandc->dev, "invalid chip select\n");
  1326. }
  1327. /*
  1328. * NAND controller page layout info
  1329. *
  1330. * Layout with ECC enabled:
  1331. *
  1332. * |----------------------| |---------------------------------|
  1333. * | xx.......yy| | *********xx.......yy|
  1334. * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
  1335. * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
  1336. * | xx.......yy| | *********xx.......yy|
  1337. * |----------------------| |---------------------------------|
  1338. * codeword 1,2..n-1 codeword n
  1339. * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
  1340. *
  1341. * n = Number of codewords in the page
  1342. * . = ECC bytes
  1343. * * = Spare/free bytes
  1344. * x = Unused byte(s)
  1345. * y = Reserved byte(s)
  1346. *
  1347. * 2K page: n = 4, spare = 16 bytes
  1348. * 4K page: n = 8, spare = 32 bytes
  1349. * 8K page: n = 16, spare = 64 bytes
  1350. *
  1351. * the qcom nand controller operates at a sub page/codeword level. each
  1352. * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
  1353. * the number of ECC bytes vary based on the ECC strength and the bus width.
  1354. *
  1355. * the first n - 1 codewords contains 516 bytes of user data, the remaining
  1356. * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
  1357. * both user data and spare(oobavail) bytes that sum up to 516 bytes.
  1358. *
  1359. * When we access a page with ECC enabled, the reserved bytes(s) are not
  1360. * accessible at all. When reading, we fill up these unreadable positions
  1361. * with 0xffs. When writing, the controller skips writing the inaccessible
  1362. * bytes.
  1363. *
  1364. * Layout with ECC disabled:
  1365. *
  1366. * |------------------------------| |---------------------------------------|
  1367. * | yy xx.......| | bb *********xx.......|
  1368. * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
  1369. * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
  1370. * | yy xx.......| | bb *********xx.......|
  1371. * |------------------------------| |---------------------------------------|
  1372. * codeword 1,2..n-1 codeword n
  1373. * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
  1374. *
  1375. * n = Number of codewords in the page
  1376. * . = ECC bytes
  1377. * * = Spare/free bytes
  1378. * x = Unused byte(s)
  1379. * y = Dummy Bad Bock byte(s)
  1380. * b = Real Bad Block byte(s)
  1381. * size1/size2 = function of codeword size and 'n'
  1382. *
  1383. * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
  1384. * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
  1385. * Block Markers. In the last codeword, this position contains the real BBM
  1386. *
  1387. * In order to have a consistent layout between RAW and ECC modes, we assume
  1388. * the following OOB layout arrangement:
  1389. *
  1390. * |-----------| |--------------------|
  1391. * |yyxx.......| |bb*********xx.......|
  1392. * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
  1393. * |yyxx.......| |bb*********xx.......|
  1394. * |yyxx.......| |bb*********xx.......|
  1395. * |-----------| |--------------------|
  1396. * first n - 1 nth OOB region
  1397. * OOB regions
  1398. *
  1399. * n = Number of codewords in the page
  1400. * . = ECC bytes
  1401. * * = FREE OOB bytes
  1402. * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
  1403. * x = Unused byte(s)
  1404. * b = Real bad block byte(s) (inaccessible when ECC enabled)
  1405. *
  1406. * This layout is read as is when ECC is disabled. When ECC is enabled, the
  1407. * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
  1408. * and assumed as 0xffs when we read a page/oob. The ECC, unused and
  1409. * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
  1410. * the sum of the three).
  1411. */
  1412. static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  1413. struct mtd_oob_region *oobregion)
  1414. {
  1415. struct nand_chip *chip = mtd_to_nand(mtd);
  1416. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1417. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1418. if (section > 1)
  1419. return -ERANGE;
  1420. if (!section) {
  1421. oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
  1422. host->bbm_size;
  1423. oobregion->offset = 0;
  1424. } else {
  1425. oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
  1426. oobregion->offset = mtd->oobsize - oobregion->length;
  1427. }
  1428. return 0;
  1429. }
  1430. static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
  1431. struct mtd_oob_region *oobregion)
  1432. {
  1433. struct nand_chip *chip = mtd_to_nand(mtd);
  1434. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1435. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1436. if (section)
  1437. return -ERANGE;
  1438. oobregion->length = ecc->steps * 4;
  1439. oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
  1440. return 0;
  1441. }
  1442. static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
  1443. .ecc = qcom_nand_ooblayout_ecc,
  1444. .free = qcom_nand_ooblayout_free,
  1445. };
  1446. static int qcom_nand_host_setup(struct qcom_nand_host *host)
  1447. {
  1448. struct nand_chip *chip = &host->chip;
  1449. struct mtd_info *mtd = nand_to_mtd(chip);
  1450. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1451. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1452. int cwperpage, bad_block_byte;
  1453. bool wide_bus;
  1454. int ecc_mode = 1;
  1455. /*
  1456. * the controller requires each step consists of 512 bytes of data.
  1457. * bail out if DT has populated a wrong step size.
  1458. */
  1459. if (ecc->size != NANDC_STEP_SIZE) {
  1460. dev_err(nandc->dev, "invalid ecc size\n");
  1461. return -EINVAL;
  1462. }
  1463. wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
  1464. if (ecc->strength >= 8) {
  1465. /* 8 bit ECC defaults to BCH ECC on all platforms */
  1466. host->bch_enabled = true;
  1467. ecc_mode = 1;
  1468. if (wide_bus) {
  1469. host->ecc_bytes_hw = 14;
  1470. host->spare_bytes = 0;
  1471. host->bbm_size = 2;
  1472. } else {
  1473. host->ecc_bytes_hw = 13;
  1474. host->spare_bytes = 2;
  1475. host->bbm_size = 1;
  1476. }
  1477. } else {
  1478. /*
  1479. * if the controller supports BCH for 4 bit ECC, the controller
  1480. * uses lesser bytes for ECC. If RS is used, the ECC bytes is
  1481. * always 10 bytes
  1482. */
  1483. if (nandc->ecc_modes & ECC_BCH_4BIT) {
  1484. /* BCH */
  1485. host->bch_enabled = true;
  1486. ecc_mode = 0;
  1487. if (wide_bus) {
  1488. host->ecc_bytes_hw = 8;
  1489. host->spare_bytes = 2;
  1490. host->bbm_size = 2;
  1491. } else {
  1492. host->ecc_bytes_hw = 7;
  1493. host->spare_bytes = 4;
  1494. host->bbm_size = 1;
  1495. }
  1496. } else {
  1497. /* RS */
  1498. host->ecc_bytes_hw = 10;
  1499. if (wide_bus) {
  1500. host->spare_bytes = 0;
  1501. host->bbm_size = 2;
  1502. } else {
  1503. host->spare_bytes = 1;
  1504. host->bbm_size = 1;
  1505. }
  1506. }
  1507. }
  1508. /*
  1509. * we consider ecc->bytes as the sum of all the non-data content in a
  1510. * step. It gives us a clean representation of the oob area (even if
  1511. * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
  1512. * ECC and 12 bytes for 4 bit ECC
  1513. */
  1514. ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
  1515. ecc->read_page = qcom_nandc_read_page;
  1516. ecc->read_page_raw = qcom_nandc_read_page_raw;
  1517. ecc->read_oob = qcom_nandc_read_oob;
  1518. ecc->write_page = qcom_nandc_write_page;
  1519. ecc->write_page_raw = qcom_nandc_write_page_raw;
  1520. ecc->write_oob = qcom_nandc_write_oob;
  1521. ecc->mode = NAND_ECC_HW;
  1522. mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
  1523. cwperpage = mtd->writesize / ecc->size;
  1524. /*
  1525. * DATA_UD_BYTES varies based on whether the read/write command protects
  1526. * spare data with ECC too. We protect spare data by default, so we set
  1527. * it to main + spare data, which are 512 and 4 bytes respectively.
  1528. */
  1529. host->cw_data = 516;
  1530. /*
  1531. * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
  1532. * for 8 bit ECC
  1533. */
  1534. host->cw_size = host->cw_data + ecc->bytes;
  1535. if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
  1536. dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
  1537. return -EINVAL;
  1538. }
  1539. bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
  1540. host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
  1541. | host->cw_data << UD_SIZE_BYTES
  1542. | 0 << DISABLE_STATUS_AFTER_WRITE
  1543. | 5 << NUM_ADDR_CYCLES
  1544. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
  1545. | 0 << STATUS_BFR_READ
  1546. | 1 << SET_RD_MODE_AFTER_STATUS
  1547. | host->spare_bytes << SPARE_SIZE_BYTES;
  1548. host->cfg1 = 7 << NAND_RECOVERY_CYCLES
  1549. | 0 << CS_ACTIVE_BSY
  1550. | bad_block_byte << BAD_BLOCK_BYTE_NUM
  1551. | 0 << BAD_BLOCK_IN_SPARE_AREA
  1552. | 2 << WR_RD_BSY_GAP
  1553. | wide_bus << WIDE_FLASH
  1554. | host->bch_enabled << ENABLE_BCH_ECC;
  1555. host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
  1556. | host->cw_size << UD_SIZE_BYTES
  1557. | 5 << NUM_ADDR_CYCLES
  1558. | 0 << SPARE_SIZE_BYTES;
  1559. host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
  1560. | 0 << CS_ACTIVE_BSY
  1561. | 17 << BAD_BLOCK_BYTE_NUM
  1562. | 1 << BAD_BLOCK_IN_SPARE_AREA
  1563. | 2 << WR_RD_BSY_GAP
  1564. | wide_bus << WIDE_FLASH
  1565. | 1 << DEV0_CFG1_ECC_DISABLE;
  1566. host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
  1567. | 0 << ECC_SW_RESET
  1568. | host->cw_data << ECC_NUM_DATA_BYTES
  1569. | 1 << ECC_FORCE_CLK_OPEN
  1570. | ecc_mode << ECC_MODE
  1571. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
  1572. host->ecc_buf_cfg = 0x203 << NUM_STEPS;
  1573. host->clrflashstatus = FS_READY_BSY_N;
  1574. host->clrreadstatus = 0xc0;
  1575. dev_dbg(nandc->dev,
  1576. "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
  1577. host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
  1578. host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
  1579. cwperpage);
  1580. return 0;
  1581. }
  1582. static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
  1583. {
  1584. int ret;
  1585. ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
  1586. if (ret) {
  1587. dev_err(nandc->dev, "failed to set DMA mask\n");
  1588. return ret;
  1589. }
  1590. /*
  1591. * we use the internal buffer for reading ONFI params, reading small
  1592. * data like ID and status, and preforming read-copy-write operations
  1593. * when writing to a codeword partially. 532 is the maximum possible
  1594. * size of a codeword for our nand controller
  1595. */
  1596. nandc->buf_size = 532;
  1597. nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
  1598. GFP_KERNEL);
  1599. if (!nandc->data_buffer)
  1600. return -ENOMEM;
  1601. nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
  1602. GFP_KERNEL);
  1603. if (!nandc->regs)
  1604. return -ENOMEM;
  1605. nandc->reg_read_buf = devm_kzalloc(nandc->dev,
  1606. MAX_REG_RD * sizeof(*nandc->reg_read_buf),
  1607. GFP_KERNEL);
  1608. if (!nandc->reg_read_buf)
  1609. return -ENOMEM;
  1610. nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
  1611. if (!nandc->chan) {
  1612. dev_err(nandc->dev, "failed to request slave channel\n");
  1613. return -ENODEV;
  1614. }
  1615. INIT_LIST_HEAD(&nandc->desc_list);
  1616. INIT_LIST_HEAD(&nandc->host_list);
  1617. nand_hw_control_init(&nandc->controller);
  1618. return 0;
  1619. }
  1620. static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
  1621. {
  1622. dma_release_channel(nandc->chan);
  1623. }
  1624. /* one time setup of a few nand controller registers */
  1625. static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
  1626. {
  1627. /* kill onenand */
  1628. nandc_write(nandc, SFLASHC_BURST_CFG, 0);
  1629. /* enable ADM DMA */
  1630. nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  1631. /* save the original values of these registers */
  1632. nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
  1633. nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
  1634. return 0;
  1635. }
  1636. static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
  1637. struct qcom_nand_host *host,
  1638. struct device_node *dn)
  1639. {
  1640. struct nand_chip *chip = &host->chip;
  1641. struct mtd_info *mtd = nand_to_mtd(chip);
  1642. struct device *dev = nandc->dev;
  1643. int ret;
  1644. ret = of_property_read_u32(dn, "reg", &host->cs);
  1645. if (ret) {
  1646. dev_err(dev, "can't get chip-select\n");
  1647. return -ENXIO;
  1648. }
  1649. nand_set_flash_node(chip, dn);
  1650. mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
  1651. mtd->owner = THIS_MODULE;
  1652. mtd->dev.parent = dev;
  1653. chip->cmdfunc = qcom_nandc_command;
  1654. chip->select_chip = qcom_nandc_select_chip;
  1655. chip->read_byte = qcom_nandc_read_byte;
  1656. chip->read_buf = qcom_nandc_read_buf;
  1657. chip->write_buf = qcom_nandc_write_buf;
  1658. /*
  1659. * the bad block marker is readable only when we read the last codeword
  1660. * of a page with ECC disabled. currently, the nand_base and nand_bbt
  1661. * helpers don't allow us to read BB from a nand chip with ECC
  1662. * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
  1663. * and block_markbad helpers until we permanently switch to using
  1664. * MTD_OPS_RAW for all drivers (with the help of badblockbits)
  1665. */
  1666. chip->block_bad = qcom_nandc_block_bad;
  1667. chip->block_markbad = qcom_nandc_block_markbad;
  1668. chip->controller = &nandc->controller;
  1669. chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
  1670. NAND_SKIP_BBTSCAN;
  1671. /* set up initial status value */
  1672. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1673. ret = nand_scan_ident(mtd, 1, NULL);
  1674. if (ret)
  1675. return ret;
  1676. ret = qcom_nand_host_setup(host);
  1677. if (ret)
  1678. return ret;
  1679. ret = nand_scan_tail(mtd);
  1680. if (ret)
  1681. return ret;
  1682. return mtd_device_register(mtd, NULL, 0);
  1683. }
  1684. /* parse custom DT properties here */
  1685. static int qcom_nandc_parse_dt(struct platform_device *pdev)
  1686. {
  1687. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  1688. struct device_node *np = nandc->dev->of_node;
  1689. int ret;
  1690. ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
  1691. if (ret) {
  1692. dev_err(nandc->dev, "command CRCI unspecified\n");
  1693. return ret;
  1694. }
  1695. ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
  1696. if (ret) {
  1697. dev_err(nandc->dev, "data CRCI unspecified\n");
  1698. return ret;
  1699. }
  1700. return 0;
  1701. }
  1702. static int qcom_nandc_probe(struct platform_device *pdev)
  1703. {
  1704. struct qcom_nand_controller *nandc;
  1705. struct qcom_nand_host *host;
  1706. const void *dev_data;
  1707. struct device *dev = &pdev->dev;
  1708. struct device_node *dn = dev->of_node, *child;
  1709. struct resource *res;
  1710. int ret;
  1711. nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
  1712. if (!nandc)
  1713. return -ENOMEM;
  1714. platform_set_drvdata(pdev, nandc);
  1715. nandc->dev = dev;
  1716. dev_data = of_device_get_match_data(dev);
  1717. if (!dev_data) {
  1718. dev_err(&pdev->dev, "failed to get device data\n");
  1719. return -ENODEV;
  1720. }
  1721. nandc->ecc_modes = (unsigned long)dev_data;
  1722. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1723. nandc->base = devm_ioremap_resource(dev, res);
  1724. if (IS_ERR(nandc->base))
  1725. return PTR_ERR(nandc->base);
  1726. nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
  1727. nandc->core_clk = devm_clk_get(dev, "core");
  1728. if (IS_ERR(nandc->core_clk))
  1729. return PTR_ERR(nandc->core_clk);
  1730. nandc->aon_clk = devm_clk_get(dev, "aon");
  1731. if (IS_ERR(nandc->aon_clk))
  1732. return PTR_ERR(nandc->aon_clk);
  1733. ret = qcom_nandc_parse_dt(pdev);
  1734. if (ret)
  1735. return ret;
  1736. ret = qcom_nandc_alloc(nandc);
  1737. if (ret)
  1738. return ret;
  1739. ret = clk_prepare_enable(nandc->core_clk);
  1740. if (ret)
  1741. goto err_core_clk;
  1742. ret = clk_prepare_enable(nandc->aon_clk);
  1743. if (ret)
  1744. goto err_aon_clk;
  1745. ret = qcom_nandc_setup(nandc);
  1746. if (ret)
  1747. goto err_setup;
  1748. for_each_available_child_of_node(dn, child) {
  1749. if (of_device_is_compatible(child, "qcom,nandcs")) {
  1750. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  1751. if (!host) {
  1752. of_node_put(child);
  1753. ret = -ENOMEM;
  1754. goto err_cs_init;
  1755. }
  1756. ret = qcom_nand_host_init(nandc, host, child);
  1757. if (ret) {
  1758. devm_kfree(dev, host);
  1759. continue;
  1760. }
  1761. list_add_tail(&host->node, &nandc->host_list);
  1762. }
  1763. }
  1764. if (list_empty(&nandc->host_list)) {
  1765. ret = -ENODEV;
  1766. goto err_cs_init;
  1767. }
  1768. return 0;
  1769. err_cs_init:
  1770. list_for_each_entry(host, &nandc->host_list, node)
  1771. nand_release(nand_to_mtd(&host->chip));
  1772. err_setup:
  1773. clk_disable_unprepare(nandc->aon_clk);
  1774. err_aon_clk:
  1775. clk_disable_unprepare(nandc->core_clk);
  1776. err_core_clk:
  1777. qcom_nandc_unalloc(nandc);
  1778. return ret;
  1779. }
  1780. static int qcom_nandc_remove(struct platform_device *pdev)
  1781. {
  1782. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  1783. struct qcom_nand_host *host;
  1784. list_for_each_entry(host, &nandc->host_list, node)
  1785. nand_release(nand_to_mtd(&host->chip));
  1786. qcom_nandc_unalloc(nandc);
  1787. clk_disable_unprepare(nandc->aon_clk);
  1788. clk_disable_unprepare(nandc->core_clk);
  1789. return 0;
  1790. }
  1791. #define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT)
  1792. /*
  1793. * data will hold a struct pointer containing more differences once we support
  1794. * more controller variants
  1795. */
  1796. static const struct of_device_id qcom_nandc_of_match[] = {
  1797. { .compatible = "qcom,ipq806x-nand",
  1798. .data = (void *)EBI2_NANDC_ECC_MODES,
  1799. },
  1800. {}
  1801. };
  1802. MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
  1803. static struct platform_driver qcom_nandc_driver = {
  1804. .driver = {
  1805. .name = "qcom-nandc",
  1806. .of_match_table = qcom_nandc_of_match,
  1807. },
  1808. .probe = qcom_nandc_probe,
  1809. .remove = qcom_nandc_remove,
  1810. };
  1811. module_platform_driver(qcom_nandc_driver);
  1812. MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
  1813. MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
  1814. MODULE_LICENSE("GPL v2");