qcom_nandc.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/slab.h>
  15. #include <linux/bitops.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/module.h>
  19. #include <linux/mtd/nand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_mtd.h>
  24. #include <linux/delay.h>
  25. /* NANDc reg offsets */
  26. #define NAND_FLASH_CMD 0x00
  27. #define NAND_ADDR0 0x04
  28. #define NAND_ADDR1 0x08
  29. #define NAND_FLASH_CHIP_SELECT 0x0c
  30. #define NAND_EXEC_CMD 0x10
  31. #define NAND_FLASH_STATUS 0x14
  32. #define NAND_BUFFER_STATUS 0x18
  33. #define NAND_DEV0_CFG0 0x20
  34. #define NAND_DEV0_CFG1 0x24
  35. #define NAND_DEV0_ECC_CFG 0x28
  36. #define NAND_DEV1_ECC_CFG 0x2c
  37. #define NAND_DEV1_CFG0 0x30
  38. #define NAND_DEV1_CFG1 0x34
  39. #define NAND_READ_ID 0x40
  40. #define NAND_READ_STATUS 0x44
  41. #define NAND_DEV_CMD0 0xa0
  42. #define NAND_DEV_CMD1 0xa4
  43. #define NAND_DEV_CMD2 0xa8
  44. #define NAND_DEV_CMD_VLD 0xac
  45. #define SFLASHC_BURST_CFG 0xe0
  46. #define NAND_ERASED_CW_DETECT_CFG 0xe8
  47. #define NAND_ERASED_CW_DETECT_STATUS 0xec
  48. #define NAND_EBI2_ECC_BUF_CFG 0xf0
  49. #define FLASH_BUF_ACC 0x100
  50. #define NAND_CTRL 0xf00
  51. #define NAND_VERSION 0xf08
  52. #define NAND_READ_LOCATION_0 0xf20
  53. #define NAND_READ_LOCATION_1 0xf24
  54. /* dummy register offsets, used by write_reg_dma */
  55. #define NAND_DEV_CMD1_RESTORE 0xdead
  56. #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
  57. /* NAND_FLASH_CMD bits */
  58. #define PAGE_ACC BIT(4)
  59. #define LAST_PAGE BIT(5)
  60. /* NAND_FLASH_CHIP_SELECT bits */
  61. #define NAND_DEV_SEL 0
  62. #define DM_EN BIT(2)
  63. /* NAND_FLASH_STATUS bits */
  64. #define FS_OP_ERR BIT(4)
  65. #define FS_READY_BSY_N BIT(5)
  66. #define FS_MPU_ERR BIT(8)
  67. #define FS_DEVICE_STS_ERR BIT(16)
  68. #define FS_DEVICE_WP BIT(23)
  69. /* NAND_BUFFER_STATUS bits */
  70. #define BS_UNCORRECTABLE_BIT BIT(8)
  71. #define BS_CORRECTABLE_ERR_MSK 0x1f
  72. /* NAND_DEVn_CFG0 bits */
  73. #define DISABLE_STATUS_AFTER_WRITE 4
  74. #define CW_PER_PAGE 6
  75. #define UD_SIZE_BYTES 9
  76. #define ECC_PARITY_SIZE_BYTES_RS 19
  77. #define SPARE_SIZE_BYTES 23
  78. #define NUM_ADDR_CYCLES 27
  79. #define STATUS_BFR_READ 30
  80. #define SET_RD_MODE_AFTER_STATUS 31
  81. /* NAND_DEVn_CFG0 bits */
  82. #define DEV0_CFG1_ECC_DISABLE 0
  83. #define WIDE_FLASH 1
  84. #define NAND_RECOVERY_CYCLES 2
  85. #define CS_ACTIVE_BSY 5
  86. #define BAD_BLOCK_BYTE_NUM 6
  87. #define BAD_BLOCK_IN_SPARE_AREA 16
  88. #define WR_RD_BSY_GAP 17
  89. #define ENABLE_BCH_ECC 27
  90. /* NAND_DEV0_ECC_CFG bits */
  91. #define ECC_CFG_ECC_DISABLE 0
  92. #define ECC_SW_RESET 1
  93. #define ECC_MODE 4
  94. #define ECC_PARITY_SIZE_BYTES_BCH 8
  95. #define ECC_NUM_DATA_BYTES 16
  96. #define ECC_FORCE_CLK_OPEN 30
  97. /* NAND_DEV_CMD1 bits */
  98. #define READ_ADDR 0
  99. /* NAND_DEV_CMD_VLD bits */
  100. #define READ_START_VLD 0
  101. /* NAND_EBI2_ECC_BUF_CFG bits */
  102. #define NUM_STEPS 0
  103. /* NAND_ERASED_CW_DETECT_CFG bits */
  104. #define ERASED_CW_ECC_MASK 1
  105. #define AUTO_DETECT_RES 0
  106. #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
  107. #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
  108. #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
  109. #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
  110. #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
  111. /* NAND_ERASED_CW_DETECT_STATUS bits */
  112. #define PAGE_ALL_ERASED BIT(7)
  113. #define CODEWORD_ALL_ERASED BIT(6)
  114. #define PAGE_ERASED BIT(5)
  115. #define CODEWORD_ERASED BIT(4)
  116. #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
  117. #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
  118. /* Version Mask */
  119. #define NAND_VERSION_MAJOR_MASK 0xf0000000
  120. #define NAND_VERSION_MAJOR_SHIFT 28
  121. #define NAND_VERSION_MINOR_MASK 0x0fff0000
  122. #define NAND_VERSION_MINOR_SHIFT 16
  123. /* NAND OP_CMDs */
  124. #define PAGE_READ 0x2
  125. #define PAGE_READ_WITH_ECC 0x3
  126. #define PAGE_READ_WITH_ECC_SPARE 0x4
  127. #define PROGRAM_PAGE 0x6
  128. #define PAGE_PROGRAM_WITH_ECC 0x7
  129. #define PROGRAM_PAGE_SPARE 0x9
  130. #define BLOCK_ERASE 0xa
  131. #define FETCH_ID 0xb
  132. #define RESET_DEVICE 0xd
  133. /*
  134. * the NAND controller performs reads/writes with ECC in 516 byte chunks.
  135. * the driver calls the chunks 'step' or 'codeword' interchangeably
  136. */
  137. #define NANDC_STEP_SIZE 512
  138. /*
  139. * the largest page size we support is 8K, this will have 16 steps/codewords
  140. * of 512 bytes each
  141. */
  142. #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
  143. /* we read at most 3 registers per codeword scan */
  144. #define MAX_REG_RD (3 * MAX_NUM_STEPS)
  145. /* ECC modes supported by the controller */
  146. #define ECC_NONE BIT(0)
  147. #define ECC_RS_4BIT BIT(1)
  148. #define ECC_BCH_4BIT BIT(2)
  149. #define ECC_BCH_8BIT BIT(3)
  150. struct desc_info {
  151. struct list_head node;
  152. enum dma_data_direction dir;
  153. struct scatterlist sgl;
  154. struct dma_async_tx_descriptor *dma_desc;
  155. };
  156. /*
  157. * holds the current register values that we want to write. acts as a contiguous
  158. * chunk of memory which we use to write the controller registers through DMA.
  159. */
  160. struct nandc_regs {
  161. __le32 cmd;
  162. __le32 addr0;
  163. __le32 addr1;
  164. __le32 chip_sel;
  165. __le32 exec;
  166. __le32 cfg0;
  167. __le32 cfg1;
  168. __le32 ecc_bch_cfg;
  169. __le32 clrflashstatus;
  170. __le32 clrreadstatus;
  171. __le32 cmd1;
  172. __le32 vld;
  173. __le32 orig_cmd1;
  174. __le32 orig_vld;
  175. __le32 ecc_buf_cfg;
  176. };
  177. /*
  178. * NAND controller data struct
  179. *
  180. * @controller: base controller structure
  181. * @host_list: list containing all the chips attached to the
  182. * controller
  183. * @dev: parent device
  184. * @base: MMIO base
  185. * @base_dma: physical base address of controller registers
  186. * @core_clk: controller clock
  187. * @aon_clk: another controller clock
  188. *
  189. * @chan: dma channel
  190. * @cmd_crci: ADM DMA CRCI for command flow control
  191. * @data_crci: ADM DMA CRCI for data flow control
  192. * @desc_list: DMA descriptor list (list of desc_infos)
  193. *
  194. * @data_buffer: our local DMA buffer for page read/writes,
  195. * used when we can't use the buffer provided
  196. * by upper layers directly
  197. * @buf_size/count/start: markers for chip->read_buf/write_buf functions
  198. * @reg_read_buf: local buffer for reading back registers via DMA
  199. * @reg_read_pos: marker for data read in reg_read_buf
  200. *
  201. * @regs: a contiguous chunk of memory for DMA register
  202. * writes. contains the register values to be
  203. * written to controller
  204. * @cmd1/vld: some fixed controller register values
  205. * @ecc_modes: supported ECC modes by the current controller,
  206. * initialized via DT match data
  207. */
  208. struct qcom_nand_controller {
  209. struct nand_hw_control controller;
  210. struct list_head host_list;
  211. struct device *dev;
  212. void __iomem *base;
  213. dma_addr_t base_dma;
  214. struct clk *core_clk;
  215. struct clk *aon_clk;
  216. struct dma_chan *chan;
  217. unsigned int cmd_crci;
  218. unsigned int data_crci;
  219. struct list_head desc_list;
  220. u8 *data_buffer;
  221. int buf_size;
  222. int buf_count;
  223. int buf_start;
  224. __le32 *reg_read_buf;
  225. int reg_read_pos;
  226. struct nandc_regs *regs;
  227. u32 cmd1, vld;
  228. u32 ecc_modes;
  229. };
  230. /*
  231. * NAND chip structure
  232. *
  233. * @chip: base NAND chip structure
  234. * @node: list node to add itself to host_list in
  235. * qcom_nand_controller
  236. *
  237. * @cs: chip select value for this chip
  238. * @cw_size: the number of bytes in a single step/codeword
  239. * of a page, consisting of all data, ecc, spare
  240. * and reserved bytes
  241. * @cw_data: the number of bytes within a codeword protected
  242. * by ECC
  243. * @use_ecc: request the controller to use ECC for the
  244. * upcoming read/write
  245. * @bch_enabled: flag to tell whether BCH ECC mode is used
  246. * @ecc_bytes_hw: ECC bytes used by controller hardware for this
  247. * chip
  248. * @status: value to be returned if NAND_CMD_STATUS command
  249. * is executed
  250. * @last_command: keeps track of last command on this chip. used
  251. * for reading correct status
  252. *
  253. * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
  254. * ecc/non-ecc mode for the current nand flash
  255. * device
  256. */
  257. struct qcom_nand_host {
  258. struct nand_chip chip;
  259. struct list_head node;
  260. int cs;
  261. int cw_size;
  262. int cw_data;
  263. bool use_ecc;
  264. bool bch_enabled;
  265. int ecc_bytes_hw;
  266. int spare_bytes;
  267. int bbm_size;
  268. u8 status;
  269. int last_command;
  270. u32 cfg0, cfg1;
  271. u32 cfg0_raw, cfg1_raw;
  272. u32 ecc_buf_cfg;
  273. u32 ecc_bch_cfg;
  274. u32 clrflashstatus;
  275. u32 clrreadstatus;
  276. };
  277. static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
  278. {
  279. return container_of(chip, struct qcom_nand_host, chip);
  280. }
  281. static inline struct qcom_nand_controller *
  282. get_qcom_nand_controller(struct nand_chip *chip)
  283. {
  284. return container_of(chip->controller, struct qcom_nand_controller,
  285. controller);
  286. }
  287. static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
  288. {
  289. return ioread32(nandc->base + offset);
  290. }
  291. static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
  292. u32 val)
  293. {
  294. iowrite32(val, nandc->base + offset);
  295. }
  296. static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
  297. {
  298. switch (offset) {
  299. case NAND_FLASH_CMD:
  300. return &regs->cmd;
  301. case NAND_ADDR0:
  302. return &regs->addr0;
  303. case NAND_ADDR1:
  304. return &regs->addr1;
  305. case NAND_FLASH_CHIP_SELECT:
  306. return &regs->chip_sel;
  307. case NAND_EXEC_CMD:
  308. return &regs->exec;
  309. case NAND_FLASH_STATUS:
  310. return &regs->clrflashstatus;
  311. case NAND_DEV0_CFG0:
  312. return &regs->cfg0;
  313. case NAND_DEV0_CFG1:
  314. return &regs->cfg1;
  315. case NAND_DEV0_ECC_CFG:
  316. return &regs->ecc_bch_cfg;
  317. case NAND_READ_STATUS:
  318. return &regs->clrreadstatus;
  319. case NAND_DEV_CMD1:
  320. return &regs->cmd1;
  321. case NAND_DEV_CMD1_RESTORE:
  322. return &regs->orig_cmd1;
  323. case NAND_DEV_CMD_VLD:
  324. return &regs->vld;
  325. case NAND_DEV_CMD_VLD_RESTORE:
  326. return &regs->orig_vld;
  327. case NAND_EBI2_ECC_BUF_CFG:
  328. return &regs->ecc_buf_cfg;
  329. default:
  330. return NULL;
  331. }
  332. }
  333. static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
  334. u32 val)
  335. {
  336. struct nandc_regs *regs = nandc->regs;
  337. __le32 *reg;
  338. reg = offset_to_nandc_reg(regs, offset);
  339. if (reg)
  340. *reg = cpu_to_le32(val);
  341. }
  342. /* helper to configure address register values */
  343. static void set_address(struct qcom_nand_host *host, u16 column, int page)
  344. {
  345. struct nand_chip *chip = &host->chip;
  346. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  347. if (chip->options & NAND_BUSWIDTH_16)
  348. column >>= 1;
  349. nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
  350. nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
  351. }
  352. /*
  353. * update_rw_regs: set up read/write register values, these will be
  354. * written to the NAND controller registers via DMA
  355. *
  356. * @num_cw: number of steps for the read/write operation
  357. * @read: read or write operation
  358. */
  359. static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
  360. {
  361. struct nand_chip *chip = &host->chip;
  362. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  363. u32 cmd, cfg0, cfg1, ecc_bch_cfg;
  364. if (read) {
  365. if (host->use_ecc)
  366. cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
  367. else
  368. cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
  369. } else {
  370. cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
  371. }
  372. if (host->use_ecc) {
  373. cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
  374. (num_cw - 1) << CW_PER_PAGE;
  375. cfg1 = host->cfg1;
  376. ecc_bch_cfg = host->ecc_bch_cfg;
  377. } else {
  378. cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
  379. (num_cw - 1) << CW_PER_PAGE;
  380. cfg1 = host->cfg1_raw;
  381. ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  382. }
  383. nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
  384. nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
  385. nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
  386. nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
  387. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
  388. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  389. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  390. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  391. }
  392. static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
  393. int reg_off, const void *vaddr, int size,
  394. bool flow_control)
  395. {
  396. struct desc_info *desc;
  397. struct dma_async_tx_descriptor *dma_desc;
  398. struct scatterlist *sgl;
  399. struct dma_slave_config slave_conf;
  400. enum dma_transfer_direction dir_eng;
  401. int ret;
  402. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  403. if (!desc)
  404. return -ENOMEM;
  405. sgl = &desc->sgl;
  406. sg_init_one(sgl, vaddr, size);
  407. if (read) {
  408. dir_eng = DMA_DEV_TO_MEM;
  409. desc->dir = DMA_FROM_DEVICE;
  410. } else {
  411. dir_eng = DMA_MEM_TO_DEV;
  412. desc->dir = DMA_TO_DEVICE;
  413. }
  414. ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
  415. if (ret == 0) {
  416. ret = -ENOMEM;
  417. goto err;
  418. }
  419. memset(&slave_conf, 0x00, sizeof(slave_conf));
  420. slave_conf.device_fc = flow_control;
  421. if (read) {
  422. slave_conf.src_maxburst = 16;
  423. slave_conf.src_addr = nandc->base_dma + reg_off;
  424. slave_conf.slave_id = nandc->data_crci;
  425. } else {
  426. slave_conf.dst_maxburst = 16;
  427. slave_conf.dst_addr = nandc->base_dma + reg_off;
  428. slave_conf.slave_id = nandc->cmd_crci;
  429. }
  430. ret = dmaengine_slave_config(nandc->chan, &slave_conf);
  431. if (ret) {
  432. dev_err(nandc->dev, "failed to configure dma channel\n");
  433. goto err;
  434. }
  435. dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
  436. if (!dma_desc) {
  437. dev_err(nandc->dev, "failed to prepare desc\n");
  438. ret = -EINVAL;
  439. goto err;
  440. }
  441. desc->dma_desc = dma_desc;
  442. list_add_tail(&desc->node, &nandc->desc_list);
  443. return 0;
  444. err:
  445. kfree(desc);
  446. return ret;
  447. }
  448. /*
  449. * read_reg_dma: prepares a descriptor to read a given number of
  450. * contiguous registers to the reg_read_buf pointer
  451. *
  452. * @first: offset of the first register in the contiguous block
  453. * @num_regs: number of registers to read
  454. */
  455. static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
  456. int num_regs)
  457. {
  458. bool flow_control = false;
  459. void *vaddr;
  460. int size;
  461. if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
  462. flow_control = true;
  463. size = num_regs * sizeof(u32);
  464. vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
  465. nandc->reg_read_pos += num_regs;
  466. return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
  467. }
  468. /*
  469. * write_reg_dma: prepares a descriptor to write a given number of
  470. * contiguous registers
  471. *
  472. * @first: offset of the first register in the contiguous block
  473. * @num_regs: number of registers to write
  474. */
  475. static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
  476. int num_regs)
  477. {
  478. bool flow_control = false;
  479. struct nandc_regs *regs = nandc->regs;
  480. void *vaddr;
  481. int size;
  482. vaddr = offset_to_nandc_reg(regs, first);
  483. if (first == NAND_FLASH_CMD)
  484. flow_control = true;
  485. if (first == NAND_DEV_CMD1_RESTORE)
  486. first = NAND_DEV_CMD1;
  487. if (first == NAND_DEV_CMD_VLD_RESTORE)
  488. first = NAND_DEV_CMD_VLD;
  489. size = num_regs * sizeof(u32);
  490. return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
  491. }
  492. /*
  493. * read_data_dma: prepares a DMA descriptor to transfer data from the
  494. * controller's internal buffer to the buffer 'vaddr'
  495. *
  496. * @reg_off: offset within the controller's data buffer
  497. * @vaddr: virtual address of the buffer we want to write to
  498. * @size: DMA transaction size in bytes
  499. */
  500. static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  501. const u8 *vaddr, int size)
  502. {
  503. return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
  504. }
  505. /*
  506. * write_data_dma: prepares a DMA descriptor to transfer data from
  507. * 'vaddr' to the controller's internal buffer
  508. *
  509. * @reg_off: offset within the controller's data buffer
  510. * @vaddr: virtual address of the buffer we want to read from
  511. * @size: DMA transaction size in bytes
  512. */
  513. static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  514. const u8 *vaddr, int size)
  515. {
  516. return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
  517. }
  518. /*
  519. * helper to prepare dma descriptors to configure registers needed for reading a
  520. * codeword/step in a page
  521. */
  522. static void config_cw_read(struct qcom_nand_controller *nandc)
  523. {
  524. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  525. write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
  526. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
  527. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  528. read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
  529. read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
  530. }
  531. /*
  532. * helpers to prepare dma descriptors used to configure registers needed for
  533. * writing a codeword/step in a page
  534. */
  535. static void config_cw_write_pre(struct qcom_nand_controller *nandc)
  536. {
  537. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  538. write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
  539. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
  540. }
  541. static void config_cw_write_post(struct qcom_nand_controller *nandc)
  542. {
  543. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  544. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  545. write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  546. write_reg_dma(nandc, NAND_READ_STATUS, 1);
  547. }
  548. /*
  549. * the following functions are used within chip->cmdfunc() to perform different
  550. * NAND_CMD_* commands
  551. */
  552. /* sets up descriptors for NAND_CMD_PARAM */
  553. static int nandc_param(struct qcom_nand_host *host)
  554. {
  555. struct nand_chip *chip = &host->chip;
  556. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  557. /*
  558. * NAND_CMD_PARAM is called before we know much about the FLASH chip
  559. * in use. we configure the controller to perform a raw read of 512
  560. * bytes to read onfi params
  561. */
  562. nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
  563. nandc_set_reg(nandc, NAND_ADDR0, 0);
  564. nandc_set_reg(nandc, NAND_ADDR1, 0);
  565. nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
  566. | 512 << UD_SIZE_BYTES
  567. | 5 << NUM_ADDR_CYCLES
  568. | 0 << SPARE_SIZE_BYTES);
  569. nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
  570. | 0 << CS_ACTIVE_BSY
  571. | 17 << BAD_BLOCK_BYTE_NUM
  572. | 1 << BAD_BLOCK_IN_SPARE_AREA
  573. | 2 << WR_RD_BSY_GAP
  574. | 0 << WIDE_FLASH
  575. | 1 << DEV0_CFG1_ECC_DISABLE);
  576. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
  577. /* configure CMD1 and VLD for ONFI param probing */
  578. nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
  579. (nandc->vld & ~(1 << READ_START_VLD))
  580. | 0 << READ_START_VLD);
  581. nandc_set_reg(nandc, NAND_DEV_CMD1,
  582. (nandc->cmd1 & ~(0xFF << READ_ADDR))
  583. | NAND_CMD_PARAM << READ_ADDR);
  584. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  585. nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
  586. nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
  587. write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
  588. write_reg_dma(nandc, NAND_DEV_CMD1, 1);
  589. nandc->buf_count = 512;
  590. memset(nandc->data_buffer, 0xff, nandc->buf_count);
  591. config_cw_read(nandc);
  592. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  593. nandc->buf_count);
  594. /* restore CMD1 and VLD regs */
  595. write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
  596. write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
  597. return 0;
  598. }
  599. /* sets up descriptors for NAND_CMD_ERASE1 */
  600. static int erase_block(struct qcom_nand_host *host, int page_addr)
  601. {
  602. struct nand_chip *chip = &host->chip;
  603. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  604. nandc_set_reg(nandc, NAND_FLASH_CMD,
  605. BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
  606. nandc_set_reg(nandc, NAND_ADDR0, page_addr);
  607. nandc_set_reg(nandc, NAND_ADDR1, 0);
  608. nandc_set_reg(nandc, NAND_DEV0_CFG0,
  609. host->cfg0_raw & ~(7 << CW_PER_PAGE));
  610. nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
  611. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  612. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  613. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  614. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  615. write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
  616. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  617. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  618. write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  619. write_reg_dma(nandc, NAND_READ_STATUS, 1);
  620. return 0;
  621. }
  622. /* sets up descriptors for NAND_CMD_READID */
  623. static int read_id(struct qcom_nand_host *host, int column)
  624. {
  625. struct nand_chip *chip = &host->chip;
  626. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  627. if (column == -1)
  628. return 0;
  629. nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
  630. nandc_set_reg(nandc, NAND_ADDR0, column);
  631. nandc_set_reg(nandc, NAND_ADDR1, 0);
  632. nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  633. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  634. write_reg_dma(nandc, NAND_FLASH_CMD, 4);
  635. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  636. read_reg_dma(nandc, NAND_READ_ID, 1);
  637. return 0;
  638. }
  639. /* sets up descriptors for NAND_CMD_RESET */
  640. static int reset(struct qcom_nand_host *host)
  641. {
  642. struct nand_chip *chip = &host->chip;
  643. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  644. nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
  645. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  646. write_reg_dma(nandc, NAND_FLASH_CMD, 1);
  647. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  648. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  649. return 0;
  650. }
  651. /* helpers to submit/free our list of dma descriptors */
  652. static int submit_descs(struct qcom_nand_controller *nandc)
  653. {
  654. struct desc_info *desc;
  655. dma_cookie_t cookie = 0;
  656. list_for_each_entry(desc, &nandc->desc_list, node)
  657. cookie = dmaengine_submit(desc->dma_desc);
  658. if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
  659. return -ETIMEDOUT;
  660. return 0;
  661. }
  662. static void free_descs(struct qcom_nand_controller *nandc)
  663. {
  664. struct desc_info *desc, *n;
  665. list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
  666. list_del(&desc->node);
  667. dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
  668. kfree(desc);
  669. }
  670. }
  671. /* reset the register read buffer for next NAND operation */
  672. static void clear_read_regs(struct qcom_nand_controller *nandc)
  673. {
  674. nandc->reg_read_pos = 0;
  675. memset(nandc->reg_read_buf, 0,
  676. MAX_REG_RD * sizeof(*nandc->reg_read_buf));
  677. }
  678. static void pre_command(struct qcom_nand_host *host, int command)
  679. {
  680. struct nand_chip *chip = &host->chip;
  681. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  682. nandc->buf_count = 0;
  683. nandc->buf_start = 0;
  684. host->use_ecc = false;
  685. host->last_command = command;
  686. clear_read_regs(nandc);
  687. }
  688. /*
  689. * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
  690. * privately maintained status byte, this status byte can be read after
  691. * NAND_CMD_STATUS is called
  692. */
  693. static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
  694. {
  695. struct nand_chip *chip = &host->chip;
  696. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  697. struct nand_ecc_ctrl *ecc = &chip->ecc;
  698. int num_cw;
  699. int i;
  700. num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
  701. for (i = 0; i < num_cw; i++) {
  702. u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
  703. if (flash_status & FS_MPU_ERR)
  704. host->status &= ~NAND_STATUS_WP;
  705. if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
  706. (flash_status &
  707. FS_DEVICE_STS_ERR)))
  708. host->status |= NAND_STATUS_FAIL;
  709. }
  710. }
  711. static void post_command(struct qcom_nand_host *host, int command)
  712. {
  713. struct nand_chip *chip = &host->chip;
  714. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  715. switch (command) {
  716. case NAND_CMD_READID:
  717. memcpy(nandc->data_buffer, nandc->reg_read_buf,
  718. nandc->buf_count);
  719. break;
  720. case NAND_CMD_PAGEPROG:
  721. case NAND_CMD_ERASE1:
  722. parse_erase_write_errors(host, command);
  723. break;
  724. default:
  725. break;
  726. }
  727. }
  728. /*
  729. * Implements chip->cmdfunc. It's only used for a limited set of commands.
  730. * The rest of the commands wouldn't be called by upper layers. For example,
  731. * NAND_CMD_READOOB would never be called because we have our own versions
  732. * of read_oob ops for nand_ecc_ctrl.
  733. */
  734. static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
  735. int column, int page_addr)
  736. {
  737. struct nand_chip *chip = mtd_to_nand(mtd);
  738. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  739. struct nand_ecc_ctrl *ecc = &chip->ecc;
  740. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  741. bool wait = false;
  742. int ret = 0;
  743. pre_command(host, command);
  744. switch (command) {
  745. case NAND_CMD_RESET:
  746. ret = reset(host);
  747. wait = true;
  748. break;
  749. case NAND_CMD_READID:
  750. nandc->buf_count = 4;
  751. ret = read_id(host, column);
  752. wait = true;
  753. break;
  754. case NAND_CMD_PARAM:
  755. ret = nandc_param(host);
  756. wait = true;
  757. break;
  758. case NAND_CMD_ERASE1:
  759. ret = erase_block(host, page_addr);
  760. wait = true;
  761. break;
  762. case NAND_CMD_READ0:
  763. /* we read the entire page for now */
  764. WARN_ON(column != 0);
  765. host->use_ecc = true;
  766. set_address(host, 0, page_addr);
  767. update_rw_regs(host, ecc->steps, true);
  768. break;
  769. case NAND_CMD_SEQIN:
  770. WARN_ON(column != 0);
  771. set_address(host, 0, page_addr);
  772. break;
  773. case NAND_CMD_PAGEPROG:
  774. case NAND_CMD_STATUS:
  775. case NAND_CMD_NONE:
  776. default:
  777. break;
  778. }
  779. if (ret) {
  780. dev_err(nandc->dev, "failure executing command %d\n",
  781. command);
  782. free_descs(nandc);
  783. return;
  784. }
  785. if (wait) {
  786. ret = submit_descs(nandc);
  787. if (ret)
  788. dev_err(nandc->dev,
  789. "failure submitting descs for command %d\n",
  790. command);
  791. }
  792. free_descs(nandc);
  793. post_command(host, command);
  794. }
  795. /*
  796. * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
  797. * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
  798. *
  799. * when using RS ECC, the HW reports the same erros when reading an erased CW,
  800. * but it notifies that it is an erased CW by placing special characters at
  801. * certain offsets in the buffer.
  802. *
  803. * verify if the page is erased or not, and fix up the page for RS ECC by
  804. * replacing the special characters with 0xff.
  805. */
  806. static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
  807. {
  808. u8 empty1, empty2;
  809. /*
  810. * an erased page flags an error in NAND_FLASH_STATUS, check if the page
  811. * is erased by looking for 0x54s at offsets 3 and 175 from the
  812. * beginning of each codeword
  813. */
  814. empty1 = data_buf[3];
  815. empty2 = data_buf[175];
  816. /*
  817. * if the erased codework markers, if they exist override them with
  818. * 0xffs
  819. */
  820. if ((empty1 == 0x54 && empty2 == 0xff) ||
  821. (empty1 == 0xff && empty2 == 0x54)) {
  822. data_buf[3] = 0xff;
  823. data_buf[175] = 0xff;
  824. }
  825. /*
  826. * check if the entire chunk contains 0xffs or not. if it doesn't, then
  827. * restore the original values at the special offsets
  828. */
  829. if (memchr_inv(data_buf, 0xff, data_len)) {
  830. data_buf[3] = empty1;
  831. data_buf[175] = empty2;
  832. return false;
  833. }
  834. return true;
  835. }
  836. struct read_stats {
  837. __le32 flash;
  838. __le32 buffer;
  839. __le32 erased_cw;
  840. };
  841. /*
  842. * reads back status registers set by the controller to notify page read
  843. * errors. this is equivalent to what 'ecc->correct()' would do.
  844. */
  845. static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
  846. u8 *oob_buf)
  847. {
  848. struct nand_chip *chip = &host->chip;
  849. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  850. struct mtd_info *mtd = nand_to_mtd(chip);
  851. struct nand_ecc_ctrl *ecc = &chip->ecc;
  852. unsigned int max_bitflips = 0;
  853. struct read_stats *buf;
  854. int i;
  855. buf = (struct read_stats *)nandc->reg_read_buf;
  856. for (i = 0; i < ecc->steps; i++, buf++) {
  857. u32 flash, buffer, erased_cw;
  858. int data_len, oob_len;
  859. if (i == (ecc->steps - 1)) {
  860. data_len = ecc->size - ((ecc->steps - 1) << 2);
  861. oob_len = ecc->steps << 2;
  862. } else {
  863. data_len = host->cw_data;
  864. oob_len = 0;
  865. }
  866. flash = le32_to_cpu(buf->flash);
  867. buffer = le32_to_cpu(buf->buffer);
  868. erased_cw = le32_to_cpu(buf->erased_cw);
  869. if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
  870. bool erased;
  871. /* ignore erased codeword errors */
  872. if (host->bch_enabled) {
  873. erased = (erased_cw & ERASED_CW) == ERASED_CW ?
  874. true : false;
  875. } else {
  876. erased = erased_chunk_check_and_fixup(data_buf,
  877. data_len);
  878. }
  879. if (erased) {
  880. data_buf += data_len;
  881. if (oob_buf)
  882. oob_buf += oob_len + ecc->bytes;
  883. continue;
  884. }
  885. if (buffer & BS_UNCORRECTABLE_BIT) {
  886. int ret, ecclen, extraooblen;
  887. void *eccbuf;
  888. eccbuf = oob_buf ? oob_buf + oob_len : NULL;
  889. ecclen = oob_buf ? host->ecc_bytes_hw : 0;
  890. extraooblen = oob_buf ? oob_len : 0;
  891. /*
  892. * make sure it isn't an erased page reported
  893. * as not-erased by HW because of a few bitflips
  894. */
  895. ret = nand_check_erased_ecc_chunk(data_buf,
  896. data_len, eccbuf, ecclen, oob_buf,
  897. extraooblen, ecc->strength);
  898. if (ret < 0) {
  899. mtd->ecc_stats.failed++;
  900. } else {
  901. mtd->ecc_stats.corrected += ret;
  902. max_bitflips =
  903. max_t(unsigned int, max_bitflips, ret);
  904. }
  905. }
  906. } else {
  907. unsigned int stat;
  908. stat = buffer & BS_CORRECTABLE_ERR_MSK;
  909. mtd->ecc_stats.corrected += stat;
  910. max_bitflips = max(max_bitflips, stat);
  911. }
  912. data_buf += data_len;
  913. if (oob_buf)
  914. oob_buf += oob_len + ecc->bytes;
  915. }
  916. return max_bitflips;
  917. }
  918. /*
  919. * helper to perform the actual page read operation, used by ecc->read_page(),
  920. * ecc->read_oob()
  921. */
  922. static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
  923. u8 *oob_buf)
  924. {
  925. struct nand_chip *chip = &host->chip;
  926. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  927. struct nand_ecc_ctrl *ecc = &chip->ecc;
  928. int i, ret;
  929. /* queue cmd descs for each codeword */
  930. for (i = 0; i < ecc->steps; i++) {
  931. int data_size, oob_size;
  932. if (i == (ecc->steps - 1)) {
  933. data_size = ecc->size - ((ecc->steps - 1) << 2);
  934. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  935. host->spare_bytes;
  936. } else {
  937. data_size = host->cw_data;
  938. oob_size = host->ecc_bytes_hw + host->spare_bytes;
  939. }
  940. config_cw_read(nandc);
  941. if (data_buf)
  942. read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
  943. data_size);
  944. /*
  945. * when ecc is enabled, the controller doesn't read the real
  946. * or dummy bad block markers in each chunk. To maintain a
  947. * consistent layout across RAW and ECC reads, we just
  948. * leave the real/dummy BBM offsets empty (i.e, filled with
  949. * 0xffs)
  950. */
  951. if (oob_buf) {
  952. int j;
  953. for (j = 0; j < host->bbm_size; j++)
  954. *oob_buf++ = 0xff;
  955. read_data_dma(nandc, FLASH_BUF_ACC + data_size,
  956. oob_buf, oob_size);
  957. }
  958. if (data_buf)
  959. data_buf += data_size;
  960. if (oob_buf)
  961. oob_buf += oob_size;
  962. }
  963. ret = submit_descs(nandc);
  964. if (ret)
  965. dev_err(nandc->dev, "failure to read page/oob\n");
  966. free_descs(nandc);
  967. return ret;
  968. }
  969. /*
  970. * a helper that copies the last step/codeword of a page (containing free oob)
  971. * into our local buffer
  972. */
  973. static int copy_last_cw(struct qcom_nand_host *host, int page)
  974. {
  975. struct nand_chip *chip = &host->chip;
  976. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  977. struct nand_ecc_ctrl *ecc = &chip->ecc;
  978. int size;
  979. int ret;
  980. clear_read_regs(nandc);
  981. size = host->use_ecc ? host->cw_data : host->cw_size;
  982. /* prepare a clean read buffer */
  983. memset(nandc->data_buffer, 0xff, size);
  984. set_address(host, host->cw_size * (ecc->steps - 1), page);
  985. update_rw_regs(host, 1, true);
  986. config_cw_read(nandc);
  987. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
  988. ret = submit_descs(nandc);
  989. if (ret)
  990. dev_err(nandc->dev, "failed to copy last codeword\n");
  991. free_descs(nandc);
  992. return ret;
  993. }
  994. /* implements ecc->read_page() */
  995. static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  996. uint8_t *buf, int oob_required, int page)
  997. {
  998. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  999. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1000. u8 *data_buf, *oob_buf = NULL;
  1001. int ret;
  1002. data_buf = buf;
  1003. oob_buf = oob_required ? chip->oob_poi : NULL;
  1004. ret = read_page_ecc(host, data_buf, oob_buf);
  1005. if (ret) {
  1006. dev_err(nandc->dev, "failure to read page\n");
  1007. return ret;
  1008. }
  1009. return parse_read_errors(host, data_buf, oob_buf);
  1010. }
  1011. /* implements ecc->read_page_raw() */
  1012. static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
  1013. struct nand_chip *chip, uint8_t *buf,
  1014. int oob_required, int page)
  1015. {
  1016. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1017. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1018. u8 *data_buf, *oob_buf;
  1019. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1020. int i, ret;
  1021. data_buf = buf;
  1022. oob_buf = chip->oob_poi;
  1023. host->use_ecc = false;
  1024. update_rw_regs(host, ecc->steps, true);
  1025. for (i = 0; i < ecc->steps; i++) {
  1026. int data_size1, data_size2, oob_size1, oob_size2;
  1027. int reg_off = FLASH_BUF_ACC;
  1028. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1029. oob_size1 = host->bbm_size;
  1030. if (i == (ecc->steps - 1)) {
  1031. data_size2 = ecc->size - data_size1 -
  1032. ((ecc->steps - 1) << 2);
  1033. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1034. host->spare_bytes;
  1035. } else {
  1036. data_size2 = host->cw_data - data_size1;
  1037. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1038. }
  1039. config_cw_read(nandc);
  1040. read_data_dma(nandc, reg_off, data_buf, data_size1);
  1041. reg_off += data_size1;
  1042. data_buf += data_size1;
  1043. read_data_dma(nandc, reg_off, oob_buf, oob_size1);
  1044. reg_off += oob_size1;
  1045. oob_buf += oob_size1;
  1046. read_data_dma(nandc, reg_off, data_buf, data_size2);
  1047. reg_off += data_size2;
  1048. data_buf += data_size2;
  1049. read_data_dma(nandc, reg_off, oob_buf, oob_size2);
  1050. oob_buf += oob_size2;
  1051. }
  1052. ret = submit_descs(nandc);
  1053. if (ret)
  1054. dev_err(nandc->dev, "failure to read raw page\n");
  1055. free_descs(nandc);
  1056. return 0;
  1057. }
  1058. /* implements ecc->read_oob() */
  1059. static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1060. int page)
  1061. {
  1062. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1063. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1064. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1065. int ret;
  1066. clear_read_regs(nandc);
  1067. host->use_ecc = true;
  1068. set_address(host, 0, page);
  1069. update_rw_regs(host, ecc->steps, true);
  1070. ret = read_page_ecc(host, NULL, chip->oob_poi);
  1071. if (ret)
  1072. dev_err(nandc->dev, "failure to read oob\n");
  1073. return ret;
  1074. }
  1075. /* implements ecc->write_page() */
  1076. static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1077. const uint8_t *buf, int oob_required, int page)
  1078. {
  1079. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1080. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1081. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1082. u8 *data_buf, *oob_buf;
  1083. int i, ret;
  1084. clear_read_regs(nandc);
  1085. data_buf = (u8 *)buf;
  1086. oob_buf = chip->oob_poi;
  1087. host->use_ecc = true;
  1088. update_rw_regs(host, ecc->steps, false);
  1089. for (i = 0; i < ecc->steps; i++) {
  1090. int data_size, oob_size;
  1091. if (i == (ecc->steps - 1)) {
  1092. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1093. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1094. host->spare_bytes;
  1095. } else {
  1096. data_size = host->cw_data;
  1097. oob_size = ecc->bytes;
  1098. }
  1099. config_cw_write_pre(nandc);
  1100. write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
  1101. /*
  1102. * when ECC is enabled, we don't really need to write anything
  1103. * to oob for the first n - 1 codewords since these oob regions
  1104. * just contain ECC bytes that's written by the controller
  1105. * itself. For the last codeword, we skip the bbm positions and
  1106. * write to the free oob area.
  1107. */
  1108. if (i == (ecc->steps - 1)) {
  1109. oob_buf += host->bbm_size;
  1110. write_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1111. oob_buf, oob_size);
  1112. }
  1113. config_cw_write_post(nandc);
  1114. data_buf += data_size;
  1115. oob_buf += oob_size;
  1116. }
  1117. ret = submit_descs(nandc);
  1118. if (ret)
  1119. dev_err(nandc->dev, "failure to write page\n");
  1120. free_descs(nandc);
  1121. return ret;
  1122. }
  1123. /* implements ecc->write_page_raw() */
  1124. static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
  1125. struct nand_chip *chip, const uint8_t *buf,
  1126. int oob_required, int page)
  1127. {
  1128. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1129. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1130. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1131. u8 *data_buf, *oob_buf;
  1132. int i, ret;
  1133. clear_read_regs(nandc);
  1134. data_buf = (u8 *)buf;
  1135. oob_buf = chip->oob_poi;
  1136. host->use_ecc = false;
  1137. update_rw_regs(host, ecc->steps, false);
  1138. for (i = 0; i < ecc->steps; i++) {
  1139. int data_size1, data_size2, oob_size1, oob_size2;
  1140. int reg_off = FLASH_BUF_ACC;
  1141. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1142. oob_size1 = host->bbm_size;
  1143. if (i == (ecc->steps - 1)) {
  1144. data_size2 = ecc->size - data_size1 -
  1145. ((ecc->steps - 1) << 2);
  1146. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1147. host->spare_bytes;
  1148. } else {
  1149. data_size2 = host->cw_data - data_size1;
  1150. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1151. }
  1152. config_cw_write_pre(nandc);
  1153. write_data_dma(nandc, reg_off, data_buf, data_size1);
  1154. reg_off += data_size1;
  1155. data_buf += data_size1;
  1156. write_data_dma(nandc, reg_off, oob_buf, oob_size1);
  1157. reg_off += oob_size1;
  1158. oob_buf += oob_size1;
  1159. write_data_dma(nandc, reg_off, data_buf, data_size2);
  1160. reg_off += data_size2;
  1161. data_buf += data_size2;
  1162. write_data_dma(nandc, reg_off, oob_buf, oob_size2);
  1163. oob_buf += oob_size2;
  1164. config_cw_write_post(nandc);
  1165. }
  1166. ret = submit_descs(nandc);
  1167. if (ret)
  1168. dev_err(nandc->dev, "failure to write raw page\n");
  1169. free_descs(nandc);
  1170. return ret;
  1171. }
  1172. /*
  1173. * implements ecc->write_oob()
  1174. *
  1175. * the NAND controller cannot write only data or only oob within a codeword,
  1176. * since ecc is calculated for the combined codeword. we first copy the
  1177. * entire contents for the last codeword(data + oob), replace the old oob
  1178. * with the new one in chip->oob_poi, and then write the entire codeword.
  1179. * this read-copy-write operation results in a slight performance loss.
  1180. */
  1181. static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1182. int page)
  1183. {
  1184. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1185. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1186. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1187. u8 *oob = chip->oob_poi;
  1188. int free_boff;
  1189. int data_size, oob_size;
  1190. int ret, status = 0;
  1191. host->use_ecc = true;
  1192. ret = copy_last_cw(host, page);
  1193. if (ret)
  1194. return ret;
  1195. clear_read_regs(nandc);
  1196. /* calculate the data and oob size for the last codeword/step */
  1197. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1198. oob_size = ecc->steps << 2;
  1199. free_boff = ecc->layout->oobfree[0].offset;
  1200. /* override new oob content to last codeword */
  1201. memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size);
  1202. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1203. update_rw_regs(host, 1, false);
  1204. config_cw_write_pre(nandc);
  1205. write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  1206. data_size + oob_size);
  1207. config_cw_write_post(nandc);
  1208. ret = submit_descs(nandc);
  1209. free_descs(nandc);
  1210. if (ret) {
  1211. dev_err(nandc->dev, "failure to write oob\n");
  1212. return -EIO;
  1213. }
  1214. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1215. status = chip->waitfunc(mtd, chip);
  1216. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1217. }
  1218. static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
  1219. {
  1220. struct nand_chip *chip = mtd_to_nand(mtd);
  1221. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1222. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1223. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1224. int page, ret, bbpos, bad = 0;
  1225. u32 flash_status;
  1226. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1227. /*
  1228. * configure registers for a raw sub page read, the address is set to
  1229. * the beginning of the last codeword, we don't care about reading ecc
  1230. * portion of oob. we just want the first few bytes from this codeword
  1231. * that contains the BBM
  1232. */
  1233. host->use_ecc = false;
  1234. ret = copy_last_cw(host, page);
  1235. if (ret)
  1236. goto err;
  1237. flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
  1238. if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
  1239. dev_warn(nandc->dev, "error when trying to read BBM\n");
  1240. goto err;
  1241. }
  1242. bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1243. bad = nandc->data_buffer[bbpos] != 0xff;
  1244. if (chip->options & NAND_BUSWIDTH_16)
  1245. bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
  1246. err:
  1247. return bad;
  1248. }
  1249. static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
  1250. {
  1251. struct nand_chip *chip = mtd_to_nand(mtd);
  1252. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1253. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1254. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1255. int page, ret, status = 0;
  1256. clear_read_regs(nandc);
  1257. /*
  1258. * to mark the BBM as bad, we flash the entire last codeword with 0s.
  1259. * we don't care about the rest of the content in the codeword since
  1260. * we aren't going to use this block again
  1261. */
  1262. memset(nandc->data_buffer, 0x00, host->cw_size);
  1263. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1264. /* prepare write */
  1265. host->use_ecc = false;
  1266. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1267. update_rw_regs(host, 1, false);
  1268. config_cw_write_pre(nandc);
  1269. write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
  1270. config_cw_write_post(nandc);
  1271. ret = submit_descs(nandc);
  1272. free_descs(nandc);
  1273. if (ret) {
  1274. dev_err(nandc->dev, "failure to update BBM\n");
  1275. return -EIO;
  1276. }
  1277. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1278. status = chip->waitfunc(mtd, chip);
  1279. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1280. }
  1281. /*
  1282. * the three functions below implement chip->read_byte(), chip->read_buf()
  1283. * and chip->write_buf() respectively. these aren't used for
  1284. * reading/writing page data, they are used for smaller data like reading
  1285. * id, status etc
  1286. */
  1287. static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
  1288. {
  1289. struct nand_chip *chip = mtd_to_nand(mtd);
  1290. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1291. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1292. u8 *buf = nandc->data_buffer;
  1293. u8 ret = 0x0;
  1294. if (host->last_command == NAND_CMD_STATUS) {
  1295. ret = host->status;
  1296. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1297. return ret;
  1298. }
  1299. if (nandc->buf_start < nandc->buf_count)
  1300. ret = buf[nandc->buf_start++];
  1301. return ret;
  1302. }
  1303. static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  1304. {
  1305. struct nand_chip *chip = mtd_to_nand(mtd);
  1306. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1307. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1308. memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
  1309. nandc->buf_start += real_len;
  1310. }
  1311. static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  1312. int len)
  1313. {
  1314. struct nand_chip *chip = mtd_to_nand(mtd);
  1315. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1316. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1317. memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
  1318. nandc->buf_start += real_len;
  1319. }
  1320. /* we support only one external chip for now */
  1321. static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
  1322. {
  1323. struct nand_chip *chip = mtd_to_nand(mtd);
  1324. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1325. if (chipnr <= 0)
  1326. return;
  1327. dev_warn(nandc->dev, "invalid chip select\n");
  1328. }
  1329. /*
  1330. * NAND controller page layout info
  1331. *
  1332. * Layout with ECC enabled:
  1333. *
  1334. * |----------------------| |---------------------------------|
  1335. * | xx.......yy| | *********xx.......yy|
  1336. * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
  1337. * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
  1338. * | xx.......yy| | *********xx.......yy|
  1339. * |----------------------| |---------------------------------|
  1340. * codeword 1,2..n-1 codeword n
  1341. * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
  1342. *
  1343. * n = Number of codewords in the page
  1344. * . = ECC bytes
  1345. * * = Spare/free bytes
  1346. * x = Unused byte(s)
  1347. * y = Reserved byte(s)
  1348. *
  1349. * 2K page: n = 4, spare = 16 bytes
  1350. * 4K page: n = 8, spare = 32 bytes
  1351. * 8K page: n = 16, spare = 64 bytes
  1352. *
  1353. * the qcom nand controller operates at a sub page/codeword level. each
  1354. * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
  1355. * the number of ECC bytes vary based on the ECC strength and the bus width.
  1356. *
  1357. * the first n - 1 codewords contains 516 bytes of user data, the remaining
  1358. * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
  1359. * both user data and spare(oobavail) bytes that sum up to 516 bytes.
  1360. *
  1361. * When we access a page with ECC enabled, the reserved bytes(s) are not
  1362. * accessible at all. When reading, we fill up these unreadable positions
  1363. * with 0xffs. When writing, the controller skips writing the inaccessible
  1364. * bytes.
  1365. *
  1366. * Layout with ECC disabled:
  1367. *
  1368. * |------------------------------| |---------------------------------------|
  1369. * | yy xx.......| | bb *********xx.......|
  1370. * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
  1371. * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
  1372. * | yy xx.......| | bb *********xx.......|
  1373. * |------------------------------| |---------------------------------------|
  1374. * codeword 1,2..n-1 codeword n
  1375. * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
  1376. *
  1377. * n = Number of codewords in the page
  1378. * . = ECC bytes
  1379. * * = Spare/free bytes
  1380. * x = Unused byte(s)
  1381. * y = Dummy Bad Bock byte(s)
  1382. * b = Real Bad Block byte(s)
  1383. * size1/size2 = function of codeword size and 'n'
  1384. *
  1385. * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
  1386. * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
  1387. * Block Markers. In the last codeword, this position contains the real BBM
  1388. *
  1389. * In order to have a consistent layout between RAW and ECC modes, we assume
  1390. * the following OOB layout arrangement:
  1391. *
  1392. * |-----------| |--------------------|
  1393. * |yyxx.......| |bb*********xx.......|
  1394. * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
  1395. * |yyxx.......| |bb*********xx.......|
  1396. * |yyxx.......| |bb*********xx.......|
  1397. * |-----------| |--------------------|
  1398. * first n - 1 nth OOB region
  1399. * OOB regions
  1400. *
  1401. * n = Number of codewords in the page
  1402. * . = ECC bytes
  1403. * * = FREE OOB bytes
  1404. * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
  1405. * x = Unused byte(s)
  1406. * b = Real bad block byte(s) (inaccessible when ECC enabled)
  1407. *
  1408. * This layout is read as is when ECC is disabled. When ECC is enabled, the
  1409. * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
  1410. * and assumed as 0xffs when we read a page/oob. The ECC, unused and
  1411. * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e,
  1412. * ecc->bytes is the sum of the three).
  1413. */
  1414. static struct nand_ecclayout *
  1415. qcom_nand_create_layout(struct qcom_nand_host *host)
  1416. {
  1417. struct nand_chip *chip = &host->chip;
  1418. struct mtd_info *mtd = nand_to_mtd(chip);
  1419. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1420. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1421. struct nand_ecclayout *layout;
  1422. int i, j, steps, pos = 0, shift = 0;
  1423. layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL);
  1424. if (!layout)
  1425. return NULL;
  1426. steps = mtd->writesize / ecc->size;
  1427. layout->eccbytes = steps * ecc->bytes;
  1428. layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size;
  1429. layout->oobfree[0].length = steps << 2;
  1430. /*
  1431. * the oob bytes in the first n - 1 codewords are all grouped together
  1432. * in the format:
  1433. * DUMMY_BBM + UNUSED + ECC
  1434. */
  1435. for (i = 0; i < steps - 1; i++) {
  1436. for (j = 0; j < ecc->bytes; j++)
  1437. layout->eccpos[pos++] = i * ecc->bytes + j;
  1438. }
  1439. /*
  1440. * the oob bytes in the last codeword are grouped in the format:
  1441. * BBM + FREE OOB + UNUSED + ECC
  1442. */
  1443. /* fill up the bbm positions */
  1444. for (j = 0; j < host->bbm_size; j++)
  1445. layout->eccpos[pos++] = i * ecc->bytes + j;
  1446. /*
  1447. * fill up the ecc and reserved positions, their indices are offseted
  1448. * by the free oob region
  1449. */
  1450. shift = layout->oobfree[0].length + host->bbm_size;
  1451. for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++)
  1452. layout->eccpos[pos++] = i * ecc->bytes + shift + j;
  1453. return layout;
  1454. }
  1455. static int qcom_nand_host_setup(struct qcom_nand_host *host)
  1456. {
  1457. struct nand_chip *chip = &host->chip;
  1458. struct mtd_info *mtd = nand_to_mtd(chip);
  1459. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1460. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1461. int cwperpage, bad_block_byte;
  1462. bool wide_bus;
  1463. int ecc_mode = 1;
  1464. /*
  1465. * the controller requires each step consists of 512 bytes of data.
  1466. * bail out if DT has populated a wrong step size.
  1467. */
  1468. if (ecc->size != NANDC_STEP_SIZE) {
  1469. dev_err(nandc->dev, "invalid ecc size\n");
  1470. return -EINVAL;
  1471. }
  1472. wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
  1473. if (ecc->strength >= 8) {
  1474. /* 8 bit ECC defaults to BCH ECC on all platforms */
  1475. host->bch_enabled = true;
  1476. ecc_mode = 1;
  1477. if (wide_bus) {
  1478. host->ecc_bytes_hw = 14;
  1479. host->spare_bytes = 0;
  1480. host->bbm_size = 2;
  1481. } else {
  1482. host->ecc_bytes_hw = 13;
  1483. host->spare_bytes = 2;
  1484. host->bbm_size = 1;
  1485. }
  1486. } else {
  1487. /*
  1488. * if the controller supports BCH for 4 bit ECC, the controller
  1489. * uses lesser bytes for ECC. If RS is used, the ECC bytes is
  1490. * always 10 bytes
  1491. */
  1492. if (nandc->ecc_modes & ECC_BCH_4BIT) {
  1493. /* BCH */
  1494. host->bch_enabled = true;
  1495. ecc_mode = 0;
  1496. if (wide_bus) {
  1497. host->ecc_bytes_hw = 8;
  1498. host->spare_bytes = 2;
  1499. host->bbm_size = 2;
  1500. } else {
  1501. host->ecc_bytes_hw = 7;
  1502. host->spare_bytes = 4;
  1503. host->bbm_size = 1;
  1504. }
  1505. } else {
  1506. /* RS */
  1507. host->ecc_bytes_hw = 10;
  1508. if (wide_bus) {
  1509. host->spare_bytes = 0;
  1510. host->bbm_size = 2;
  1511. } else {
  1512. host->spare_bytes = 1;
  1513. host->bbm_size = 1;
  1514. }
  1515. }
  1516. }
  1517. /*
  1518. * we consider ecc->bytes as the sum of all the non-data content in a
  1519. * step. It gives us a clean representation of the oob area (even if
  1520. * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
  1521. * ECC and 12 bytes for 4 bit ECC
  1522. */
  1523. ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
  1524. ecc->read_page = qcom_nandc_read_page;
  1525. ecc->read_page_raw = qcom_nandc_read_page_raw;
  1526. ecc->read_oob = qcom_nandc_read_oob;
  1527. ecc->write_page = qcom_nandc_write_page;
  1528. ecc->write_page_raw = qcom_nandc_write_page_raw;
  1529. ecc->write_oob = qcom_nandc_write_oob;
  1530. ecc->mode = NAND_ECC_HW;
  1531. ecc->layout = qcom_nand_create_layout(host);
  1532. if (!ecc->layout)
  1533. return -ENOMEM;
  1534. cwperpage = mtd->writesize / ecc->size;
  1535. /*
  1536. * DATA_UD_BYTES varies based on whether the read/write command protects
  1537. * spare data with ECC too. We protect spare data by default, so we set
  1538. * it to main + spare data, which are 512 and 4 bytes respectively.
  1539. */
  1540. host->cw_data = 516;
  1541. /*
  1542. * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
  1543. * for 8 bit ECC
  1544. */
  1545. host->cw_size = host->cw_data + ecc->bytes;
  1546. if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
  1547. dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
  1548. return -EINVAL;
  1549. }
  1550. bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
  1551. host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
  1552. | host->cw_data << UD_SIZE_BYTES
  1553. | 0 << DISABLE_STATUS_AFTER_WRITE
  1554. | 5 << NUM_ADDR_CYCLES
  1555. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
  1556. | 0 << STATUS_BFR_READ
  1557. | 1 << SET_RD_MODE_AFTER_STATUS
  1558. | host->spare_bytes << SPARE_SIZE_BYTES;
  1559. host->cfg1 = 7 << NAND_RECOVERY_CYCLES
  1560. | 0 << CS_ACTIVE_BSY
  1561. | bad_block_byte << BAD_BLOCK_BYTE_NUM
  1562. | 0 << BAD_BLOCK_IN_SPARE_AREA
  1563. | 2 << WR_RD_BSY_GAP
  1564. | wide_bus << WIDE_FLASH
  1565. | host->bch_enabled << ENABLE_BCH_ECC;
  1566. host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
  1567. | host->cw_size << UD_SIZE_BYTES
  1568. | 5 << NUM_ADDR_CYCLES
  1569. | 0 << SPARE_SIZE_BYTES;
  1570. host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
  1571. | 0 << CS_ACTIVE_BSY
  1572. | 17 << BAD_BLOCK_BYTE_NUM
  1573. | 1 << BAD_BLOCK_IN_SPARE_AREA
  1574. | 2 << WR_RD_BSY_GAP
  1575. | wide_bus << WIDE_FLASH
  1576. | 1 << DEV0_CFG1_ECC_DISABLE;
  1577. host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
  1578. | 0 << ECC_SW_RESET
  1579. | host->cw_data << ECC_NUM_DATA_BYTES
  1580. | 1 << ECC_FORCE_CLK_OPEN
  1581. | ecc_mode << ECC_MODE
  1582. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
  1583. host->ecc_buf_cfg = 0x203 << NUM_STEPS;
  1584. host->clrflashstatus = FS_READY_BSY_N;
  1585. host->clrreadstatus = 0xc0;
  1586. dev_dbg(nandc->dev,
  1587. "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
  1588. host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
  1589. host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
  1590. cwperpage);
  1591. return 0;
  1592. }
  1593. static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
  1594. {
  1595. int ret;
  1596. ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
  1597. if (ret) {
  1598. dev_err(nandc->dev, "failed to set DMA mask\n");
  1599. return ret;
  1600. }
  1601. /*
  1602. * we use the internal buffer for reading ONFI params, reading small
  1603. * data like ID and status, and preforming read-copy-write operations
  1604. * when writing to a codeword partially. 532 is the maximum possible
  1605. * size of a codeword for our nand controller
  1606. */
  1607. nandc->buf_size = 532;
  1608. nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
  1609. GFP_KERNEL);
  1610. if (!nandc->data_buffer)
  1611. return -ENOMEM;
  1612. nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
  1613. GFP_KERNEL);
  1614. if (!nandc->regs)
  1615. return -ENOMEM;
  1616. nandc->reg_read_buf = devm_kzalloc(nandc->dev,
  1617. MAX_REG_RD * sizeof(*nandc->reg_read_buf),
  1618. GFP_KERNEL);
  1619. if (!nandc->reg_read_buf)
  1620. return -ENOMEM;
  1621. nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
  1622. if (!nandc->chan) {
  1623. dev_err(nandc->dev, "failed to request slave channel\n");
  1624. return -ENODEV;
  1625. }
  1626. INIT_LIST_HEAD(&nandc->desc_list);
  1627. INIT_LIST_HEAD(&nandc->host_list);
  1628. spin_lock_init(&nandc->controller.lock);
  1629. init_waitqueue_head(&nandc->controller.wq);
  1630. return 0;
  1631. }
  1632. static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
  1633. {
  1634. dma_release_channel(nandc->chan);
  1635. }
  1636. /* one time setup of a few nand controller registers */
  1637. static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
  1638. {
  1639. /* kill onenand */
  1640. nandc_write(nandc, SFLASHC_BURST_CFG, 0);
  1641. /* enable ADM DMA */
  1642. nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  1643. /* save the original values of these registers */
  1644. nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
  1645. nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
  1646. return 0;
  1647. }
  1648. static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
  1649. struct qcom_nand_host *host,
  1650. struct device_node *dn)
  1651. {
  1652. struct nand_chip *chip = &host->chip;
  1653. struct mtd_info *mtd = nand_to_mtd(chip);
  1654. struct device *dev = nandc->dev;
  1655. int ret;
  1656. ret = of_property_read_u32(dn, "reg", &host->cs);
  1657. if (ret) {
  1658. dev_err(dev, "can't get chip-select\n");
  1659. return -ENXIO;
  1660. }
  1661. nand_set_flash_node(chip, dn);
  1662. mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
  1663. mtd->owner = THIS_MODULE;
  1664. mtd->dev.parent = dev;
  1665. chip->cmdfunc = qcom_nandc_command;
  1666. chip->select_chip = qcom_nandc_select_chip;
  1667. chip->read_byte = qcom_nandc_read_byte;
  1668. chip->read_buf = qcom_nandc_read_buf;
  1669. chip->write_buf = qcom_nandc_write_buf;
  1670. /*
  1671. * the bad block marker is readable only when we read the last codeword
  1672. * of a page with ECC disabled. currently, the nand_base and nand_bbt
  1673. * helpers don't allow us to read BB from a nand chip with ECC
  1674. * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
  1675. * and block_markbad helpers until we permanently switch to using
  1676. * MTD_OPS_RAW for all drivers (with the help of badblockbits)
  1677. */
  1678. chip->block_bad = qcom_nandc_block_bad;
  1679. chip->block_markbad = qcom_nandc_block_markbad;
  1680. chip->controller = &nandc->controller;
  1681. chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
  1682. NAND_SKIP_BBTSCAN;
  1683. /* set up initial status value */
  1684. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1685. ret = nand_scan_ident(mtd, 1, NULL);
  1686. if (ret)
  1687. return ret;
  1688. ret = qcom_nand_host_setup(host);
  1689. if (ret)
  1690. return ret;
  1691. ret = nand_scan_tail(mtd);
  1692. if (ret)
  1693. return ret;
  1694. return mtd_device_register(mtd, NULL, 0);
  1695. }
  1696. /* parse custom DT properties here */
  1697. static int qcom_nandc_parse_dt(struct platform_device *pdev)
  1698. {
  1699. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  1700. struct device_node *np = nandc->dev->of_node;
  1701. int ret;
  1702. ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
  1703. if (ret) {
  1704. dev_err(nandc->dev, "command CRCI unspecified\n");
  1705. return ret;
  1706. }
  1707. ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
  1708. if (ret) {
  1709. dev_err(nandc->dev, "data CRCI unspecified\n");
  1710. return ret;
  1711. }
  1712. return 0;
  1713. }
  1714. static int qcom_nandc_probe(struct platform_device *pdev)
  1715. {
  1716. struct qcom_nand_controller *nandc;
  1717. struct qcom_nand_host *host;
  1718. const void *dev_data;
  1719. struct device *dev = &pdev->dev;
  1720. struct device_node *dn = dev->of_node, *child;
  1721. struct resource *res;
  1722. int ret;
  1723. nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
  1724. if (!nandc)
  1725. return -ENOMEM;
  1726. platform_set_drvdata(pdev, nandc);
  1727. nandc->dev = dev;
  1728. dev_data = of_device_get_match_data(dev);
  1729. if (!dev_data) {
  1730. dev_err(&pdev->dev, "failed to get device data\n");
  1731. return -ENODEV;
  1732. }
  1733. nandc->ecc_modes = (unsigned long)dev_data;
  1734. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1735. nandc->base = devm_ioremap_resource(dev, res);
  1736. if (IS_ERR(nandc->base))
  1737. return PTR_ERR(nandc->base);
  1738. nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
  1739. nandc->core_clk = devm_clk_get(dev, "core");
  1740. if (IS_ERR(nandc->core_clk))
  1741. return PTR_ERR(nandc->core_clk);
  1742. nandc->aon_clk = devm_clk_get(dev, "aon");
  1743. if (IS_ERR(nandc->aon_clk))
  1744. return PTR_ERR(nandc->aon_clk);
  1745. ret = qcom_nandc_parse_dt(pdev);
  1746. if (ret)
  1747. return ret;
  1748. ret = qcom_nandc_alloc(nandc);
  1749. if (ret)
  1750. return ret;
  1751. ret = clk_prepare_enable(nandc->core_clk);
  1752. if (ret)
  1753. goto err_core_clk;
  1754. ret = clk_prepare_enable(nandc->aon_clk);
  1755. if (ret)
  1756. goto err_aon_clk;
  1757. ret = qcom_nandc_setup(nandc);
  1758. if (ret)
  1759. goto err_setup;
  1760. for_each_available_child_of_node(dn, child) {
  1761. if (of_device_is_compatible(child, "qcom,nandcs")) {
  1762. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  1763. if (!host) {
  1764. of_node_put(child);
  1765. ret = -ENOMEM;
  1766. goto err_cs_init;
  1767. }
  1768. ret = qcom_nand_host_init(nandc, host, child);
  1769. if (ret) {
  1770. devm_kfree(dev, host);
  1771. continue;
  1772. }
  1773. list_add_tail(&host->node, &nandc->host_list);
  1774. }
  1775. }
  1776. if (list_empty(&nandc->host_list)) {
  1777. ret = -ENODEV;
  1778. goto err_cs_init;
  1779. }
  1780. return 0;
  1781. err_cs_init:
  1782. list_for_each_entry(host, &nandc->host_list, node)
  1783. nand_release(nand_to_mtd(&host->chip));
  1784. err_setup:
  1785. clk_disable_unprepare(nandc->aon_clk);
  1786. err_aon_clk:
  1787. clk_disable_unprepare(nandc->core_clk);
  1788. err_core_clk:
  1789. qcom_nandc_unalloc(nandc);
  1790. return ret;
  1791. }
  1792. static int qcom_nandc_remove(struct platform_device *pdev)
  1793. {
  1794. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  1795. struct qcom_nand_host *host;
  1796. list_for_each_entry(host, &nandc->host_list, node)
  1797. nand_release(nand_to_mtd(&host->chip));
  1798. qcom_nandc_unalloc(nandc);
  1799. clk_disable_unprepare(nandc->aon_clk);
  1800. clk_disable_unprepare(nandc->core_clk);
  1801. return 0;
  1802. }
  1803. #define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT)
  1804. /*
  1805. * data will hold a struct pointer containing more differences once we support
  1806. * more controller variants
  1807. */
  1808. static const struct of_device_id qcom_nandc_of_match[] = {
  1809. { .compatible = "qcom,ipq806x-nand",
  1810. .data = (void *)EBI2_NANDC_ECC_MODES,
  1811. },
  1812. {}
  1813. };
  1814. MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
  1815. static struct platform_driver qcom_nandc_driver = {
  1816. .driver = {
  1817. .name = "qcom-nandc",
  1818. .of_match_table = qcom_nandc_of_match,
  1819. },
  1820. .probe = qcom_nandc_probe,
  1821. .remove = qcom_nandc_remove,
  1822. };
  1823. module_platform_driver(qcom_nandc_driver);
  1824. MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
  1825. MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
  1826. MODULE_LICENSE("GPL v2");