sunxi_nand.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310
  1. /*
  2. * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
  3. *
  4. * Derived from:
  5. * https://github.com/yuq/sunxi-nfc-mtd
  6. * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
  7. *
  8. * https://github.com/hno/Allwinner-Info
  9. * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
  10. *
  11. * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
  12. * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. */
  24. #include <linux/dma-mapping.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/of.h>
  30. #include <linux/of_device.h>
  31. #include <linux/of_gpio.h>
  32. #include <linux/mtd/mtd.h>
  33. #include <linux/mtd/rawnand.h>
  34. #include <linux/mtd/partitions.h>
  35. #include <linux/clk.h>
  36. #include <linux/delay.h>
  37. #include <linux/dmaengine.h>
  38. #include <linux/gpio.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/iopoll.h>
  41. #include <linux/reset.h>
  42. #define NFC_REG_CTL 0x0000
  43. #define NFC_REG_ST 0x0004
  44. #define NFC_REG_INT 0x0008
  45. #define NFC_REG_TIMING_CTL 0x000C
  46. #define NFC_REG_TIMING_CFG 0x0010
  47. #define NFC_REG_ADDR_LOW 0x0014
  48. #define NFC_REG_ADDR_HIGH 0x0018
  49. #define NFC_REG_SECTOR_NUM 0x001C
  50. #define NFC_REG_CNT 0x0020
  51. #define NFC_REG_CMD 0x0024
  52. #define NFC_REG_RCMD_SET 0x0028
  53. #define NFC_REG_WCMD_SET 0x002C
  54. #define NFC_REG_IO_DATA 0x0030
  55. #define NFC_REG_ECC_CTL 0x0034
  56. #define NFC_REG_ECC_ST 0x0038
  57. #define NFC_REG_DEBUG 0x003C
  58. #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
  59. #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
  60. #define NFC_REG_SPARE_AREA 0x00A0
  61. #define NFC_REG_PAT_ID 0x00A4
  62. #define NFC_RAM0_BASE 0x0400
  63. #define NFC_RAM1_BASE 0x0800
  64. /* define bit use in NFC_CTL */
  65. #define NFC_EN BIT(0)
  66. #define NFC_RESET BIT(1)
  67. #define NFC_BUS_WIDTH_MSK BIT(2)
  68. #define NFC_BUS_WIDTH_8 (0 << 2)
  69. #define NFC_BUS_WIDTH_16 (1 << 2)
  70. #define NFC_RB_SEL_MSK BIT(3)
  71. #define NFC_RB_SEL(x) ((x) << 3)
  72. #define NFC_CE_SEL_MSK GENMASK(26, 24)
  73. #define NFC_CE_SEL(x) ((x) << 24)
  74. #define NFC_CE_CTL BIT(6)
  75. #define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
  76. #define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
  77. #define NFC_SAM BIT(12)
  78. #define NFC_RAM_METHOD BIT(14)
  79. #define NFC_DEBUG_CTL BIT(31)
  80. /* define bit use in NFC_ST */
  81. #define NFC_RB_B2R BIT(0)
  82. #define NFC_CMD_INT_FLAG BIT(1)
  83. #define NFC_DMA_INT_FLAG BIT(2)
  84. #define NFC_CMD_FIFO_STATUS BIT(3)
  85. #define NFC_STA BIT(4)
  86. #define NFC_NATCH_INT_FLAG BIT(5)
  87. #define NFC_RB_STATE(x) BIT(x + 8)
  88. /* define bit use in NFC_INT */
  89. #define NFC_B2R_INT_ENABLE BIT(0)
  90. #define NFC_CMD_INT_ENABLE BIT(1)
  91. #define NFC_DMA_INT_ENABLE BIT(2)
  92. #define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
  93. NFC_CMD_INT_ENABLE | \
  94. NFC_DMA_INT_ENABLE)
  95. /* define bit use in NFC_TIMING_CTL */
  96. #define NFC_TIMING_CTL_EDO BIT(8)
  97. /* define NFC_TIMING_CFG register layout */
  98. #define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
  99. (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
  100. (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
  101. (((tCAD) & 0x7) << 8))
  102. /* define bit use in NFC_CMD */
  103. #define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
  104. #define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
  105. #define NFC_CMD(x) (x)
  106. #define NFC_ADR_NUM_MSK GENMASK(18, 16)
  107. #define NFC_ADR_NUM(x) (((x) - 1) << 16)
  108. #define NFC_SEND_ADR BIT(19)
  109. #define NFC_ACCESS_DIR BIT(20)
  110. #define NFC_DATA_TRANS BIT(21)
  111. #define NFC_SEND_CMD1 BIT(22)
  112. #define NFC_WAIT_FLAG BIT(23)
  113. #define NFC_SEND_CMD2 BIT(24)
  114. #define NFC_SEQ BIT(25)
  115. #define NFC_DATA_SWAP_METHOD BIT(26)
  116. #define NFC_ROW_AUTO_INC BIT(27)
  117. #define NFC_SEND_CMD3 BIT(28)
  118. #define NFC_SEND_CMD4 BIT(29)
  119. #define NFC_CMD_TYPE_MSK GENMASK(31, 30)
  120. #define NFC_NORMAL_OP (0 << 30)
  121. #define NFC_ECC_OP (1 << 30)
  122. #define NFC_PAGE_OP (2 << 30)
  123. /* define bit use in NFC_RCMD_SET */
  124. #define NFC_READ_CMD_MSK GENMASK(7, 0)
  125. #define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
  126. #define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
  127. /* define bit use in NFC_WCMD_SET */
  128. #define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
  129. #define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
  130. #define NFC_READ_CMD0_MSK GENMASK(23, 16)
  131. #define NFC_READ_CMD1_MSK GENMASK(31, 24)
  132. /* define bit use in NFC_ECC_CTL */
  133. #define NFC_ECC_EN BIT(0)
  134. #define NFC_ECC_PIPELINE BIT(3)
  135. #define NFC_ECC_EXCEPTION BIT(4)
  136. #define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
  137. #define NFC_ECC_BLOCK_512 BIT(5)
  138. #define NFC_RANDOM_EN BIT(9)
  139. #define NFC_RANDOM_DIRECTION BIT(10)
  140. #define NFC_ECC_MODE_MSK GENMASK(15, 12)
  141. #define NFC_ECC_MODE(x) ((x) << 12)
  142. #define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
  143. #define NFC_RANDOM_SEED(x) ((x) << 16)
  144. /* define bit use in NFC_ECC_ST */
  145. #define NFC_ECC_ERR(x) BIT(x)
  146. #define NFC_ECC_ERR_MSK GENMASK(15, 0)
  147. #define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
  148. #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
  149. #define NFC_DEFAULT_TIMEOUT_MS 1000
  150. #define NFC_SRAM_SIZE 1024
  151. #define NFC_MAX_CS 7
  152. /*
  153. * Ready/Busy detection type: describes the Ready/Busy detection modes
  154. *
  155. * @RB_NONE: no external detection available, rely on STATUS command
  156. * and software timeouts
  157. * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
  158. * pin of the NAND flash chip must be connected to one of the
  159. * native NAND R/B pins (those which can be muxed to the NAND
  160. * Controller)
  161. * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
  162. * pin of the NAND flash chip must be connected to a GPIO capable
  163. * pin.
  164. */
  165. enum sunxi_nand_rb_type {
  166. RB_NONE,
  167. RB_NATIVE,
  168. RB_GPIO,
  169. };
  170. /*
  171. * Ready/Busy structure: stores information related to Ready/Busy detection
  172. *
  173. * @type: the Ready/Busy detection mode
  174. * @info: information related to the R/B detection mode. Either a gpio
  175. * id or a native R/B id (those supported by the NAND controller).
  176. */
  177. struct sunxi_nand_rb {
  178. enum sunxi_nand_rb_type type;
  179. union {
  180. int gpio;
  181. int nativeid;
  182. } info;
  183. };
  184. /*
  185. * Chip Select structure: stores information related to NAND Chip Select
  186. *
  187. * @cs: the NAND CS id used to communicate with a NAND Chip
  188. * @rb: the Ready/Busy description
  189. */
  190. struct sunxi_nand_chip_sel {
  191. u8 cs;
  192. struct sunxi_nand_rb rb;
  193. };
  194. /*
  195. * sunxi HW ECC infos: stores information related to HW ECC support
  196. *
  197. * @mode: the sunxi ECC mode field deduced from ECC requirements
  198. */
  199. struct sunxi_nand_hw_ecc {
  200. int mode;
  201. };
  202. /*
  203. * NAND chip structure: stores NAND chip device related information
  204. *
  205. * @node: used to store NAND chips into a list
  206. * @nand: base NAND chip structure
  207. * @mtd: base MTD structure
  208. * @clk_rate: clk_rate required for this NAND chip
  209. * @timing_cfg TIMING_CFG register value for this NAND chip
  210. * @selected: current active CS
  211. * @nsels: number of CS lines required by the NAND chip
  212. * @sels: array of CS lines descriptions
  213. */
  214. struct sunxi_nand_chip {
  215. struct list_head node;
  216. struct nand_chip nand;
  217. unsigned long clk_rate;
  218. u32 timing_cfg;
  219. u32 timing_ctl;
  220. int selected;
  221. int addr_cycles;
  222. u32 addr[2];
  223. int cmd_cycles;
  224. u8 cmd[2];
  225. int nsels;
  226. struct sunxi_nand_chip_sel sels[0];
  227. };
  228. static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
  229. {
  230. return container_of(nand, struct sunxi_nand_chip, nand);
  231. }
  232. /*
  233. * NAND Controller structure: stores sunxi NAND controller information
  234. *
  235. * @controller: base controller structure
  236. * @dev: parent device (used to print error messages)
  237. * @regs: NAND controller registers
  238. * @ahb_clk: NAND Controller AHB clock
  239. * @mod_clk: NAND Controller mod clock
  240. * @assigned_cs: bitmask describing already assigned CS lines
  241. * @clk_rate: NAND controller current clock rate
  242. * @chips: a list containing all the NAND chips attached to
  243. * this NAND controller
  244. * @complete: a completion object used to wait for NAND
  245. * controller events
  246. */
  247. struct sunxi_nfc {
  248. struct nand_hw_control controller;
  249. struct device *dev;
  250. void __iomem *regs;
  251. struct clk *ahb_clk;
  252. struct clk *mod_clk;
  253. struct reset_control *reset;
  254. unsigned long assigned_cs;
  255. unsigned long clk_rate;
  256. struct list_head chips;
  257. struct completion complete;
  258. struct dma_chan *dmac;
  259. };
  260. static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
  261. {
  262. return container_of(ctrl, struct sunxi_nfc, controller);
  263. }
  264. static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
  265. {
  266. struct sunxi_nfc *nfc = dev_id;
  267. u32 st = readl(nfc->regs + NFC_REG_ST);
  268. u32 ien = readl(nfc->regs + NFC_REG_INT);
  269. if (!(ien & st))
  270. return IRQ_NONE;
  271. if ((ien & st) == ien)
  272. complete(&nfc->complete);
  273. writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
  274. writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
  275. return IRQ_HANDLED;
  276. }
  277. static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
  278. bool use_polling, unsigned int timeout_ms)
  279. {
  280. int ret;
  281. if (events & ~NFC_INT_MASK)
  282. return -EINVAL;
  283. if (!timeout_ms)
  284. timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
  285. if (!use_polling) {
  286. init_completion(&nfc->complete);
  287. writel(events, nfc->regs + NFC_REG_INT);
  288. ret = wait_for_completion_timeout(&nfc->complete,
  289. msecs_to_jiffies(timeout_ms));
  290. if (!ret)
  291. ret = -ETIMEDOUT;
  292. else
  293. ret = 0;
  294. writel(0, nfc->regs + NFC_REG_INT);
  295. } else {
  296. u32 status;
  297. ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
  298. (status & events) == events, 1,
  299. timeout_ms * 1000);
  300. }
  301. writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
  302. if (ret)
  303. dev_err(nfc->dev, "wait interrupt timedout\n");
  304. return ret;
  305. }
  306. static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
  307. {
  308. u32 status;
  309. int ret;
  310. ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
  311. !(status & NFC_CMD_FIFO_STATUS), 1,
  312. NFC_DEFAULT_TIMEOUT_MS * 1000);
  313. if (ret)
  314. dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
  315. return ret;
  316. }
  317. static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
  318. {
  319. u32 ctl;
  320. int ret;
  321. writel(0, nfc->regs + NFC_REG_ECC_CTL);
  322. writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
  323. ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
  324. !(ctl & NFC_RESET), 1,
  325. NFC_DEFAULT_TIMEOUT_MS * 1000);
  326. if (ret)
  327. dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
  328. return ret;
  329. }
  330. static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
  331. int chunksize, int nchunks,
  332. enum dma_data_direction ddir,
  333. struct scatterlist *sg)
  334. {
  335. struct nand_chip *nand = mtd_to_nand(mtd);
  336. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  337. struct dma_async_tx_descriptor *dmad;
  338. enum dma_transfer_direction tdir;
  339. dma_cookie_t dmat;
  340. int ret;
  341. if (ddir == DMA_FROM_DEVICE)
  342. tdir = DMA_DEV_TO_MEM;
  343. else
  344. tdir = DMA_MEM_TO_DEV;
  345. sg_init_one(sg, buf, nchunks * chunksize);
  346. ret = dma_map_sg(nfc->dev, sg, 1, ddir);
  347. if (!ret)
  348. return -ENOMEM;
  349. dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
  350. if (!dmad) {
  351. ret = -EINVAL;
  352. goto err_unmap_buf;
  353. }
  354. writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
  355. nfc->regs + NFC_REG_CTL);
  356. writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
  357. writel(chunksize, nfc->regs + NFC_REG_CNT);
  358. dmat = dmaengine_submit(dmad);
  359. ret = dma_submit_error(dmat);
  360. if (ret)
  361. goto err_clr_dma_flag;
  362. return 0;
  363. err_clr_dma_flag:
  364. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
  365. nfc->regs + NFC_REG_CTL);
  366. err_unmap_buf:
  367. dma_unmap_sg(nfc->dev, sg, 1, ddir);
  368. return ret;
  369. }
  370. static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
  371. enum dma_data_direction ddir,
  372. struct scatterlist *sg)
  373. {
  374. struct nand_chip *nand = mtd_to_nand(mtd);
  375. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  376. dma_unmap_sg(nfc->dev, sg, 1, ddir);
  377. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
  378. nfc->regs + NFC_REG_CTL);
  379. }
  380. static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
  381. {
  382. struct nand_chip *nand = mtd_to_nand(mtd);
  383. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  384. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  385. struct sunxi_nand_rb *rb;
  386. int ret;
  387. if (sunxi_nand->selected < 0)
  388. return 0;
  389. rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
  390. switch (rb->type) {
  391. case RB_NATIVE:
  392. ret = !!(readl(nfc->regs + NFC_REG_ST) &
  393. NFC_RB_STATE(rb->info.nativeid));
  394. break;
  395. case RB_GPIO:
  396. ret = gpio_get_value(rb->info.gpio);
  397. break;
  398. case RB_NONE:
  399. default:
  400. ret = 0;
  401. dev_err(nfc->dev, "cannot check R/B NAND status!\n");
  402. break;
  403. }
  404. return ret;
  405. }
  406. static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
  407. {
  408. struct nand_chip *nand = mtd_to_nand(mtd);
  409. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  410. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  411. struct sunxi_nand_chip_sel *sel;
  412. u32 ctl;
  413. if (chip > 0 && chip >= sunxi_nand->nsels)
  414. return;
  415. if (chip == sunxi_nand->selected)
  416. return;
  417. ctl = readl(nfc->regs + NFC_REG_CTL) &
  418. ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
  419. if (chip >= 0) {
  420. sel = &sunxi_nand->sels[chip];
  421. ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
  422. NFC_PAGE_SHIFT(nand->page_shift);
  423. if (sel->rb.type == RB_NONE) {
  424. nand->dev_ready = NULL;
  425. } else {
  426. nand->dev_ready = sunxi_nfc_dev_ready;
  427. if (sel->rb.type == RB_NATIVE)
  428. ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
  429. }
  430. writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
  431. if (nfc->clk_rate != sunxi_nand->clk_rate) {
  432. clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
  433. nfc->clk_rate = sunxi_nand->clk_rate;
  434. }
  435. }
  436. writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
  437. writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
  438. writel(ctl, nfc->regs + NFC_REG_CTL);
  439. sunxi_nand->selected = chip;
  440. }
  441. static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  442. {
  443. struct nand_chip *nand = mtd_to_nand(mtd);
  444. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  445. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  446. int ret;
  447. int cnt;
  448. int offs = 0;
  449. u32 tmp;
  450. while (len > offs) {
  451. bool poll = false;
  452. cnt = min(len - offs, NFC_SRAM_SIZE);
  453. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  454. if (ret)
  455. break;
  456. writel(cnt, nfc->regs + NFC_REG_CNT);
  457. tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
  458. writel(tmp, nfc->regs + NFC_REG_CMD);
  459. /* Arbitrary limit for polling mode */
  460. if (cnt < 64)
  461. poll = true;
  462. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
  463. if (ret)
  464. break;
  465. if (buf)
  466. memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
  467. cnt);
  468. offs += cnt;
  469. }
  470. }
  471. static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  472. int len)
  473. {
  474. struct nand_chip *nand = mtd_to_nand(mtd);
  475. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  476. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  477. int ret;
  478. int cnt;
  479. int offs = 0;
  480. u32 tmp;
  481. while (len > offs) {
  482. bool poll = false;
  483. cnt = min(len - offs, NFC_SRAM_SIZE);
  484. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  485. if (ret)
  486. break;
  487. writel(cnt, nfc->regs + NFC_REG_CNT);
  488. memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
  489. tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
  490. NFC_ACCESS_DIR;
  491. writel(tmp, nfc->regs + NFC_REG_CMD);
  492. /* Arbitrary limit for polling mode */
  493. if (cnt < 64)
  494. poll = true;
  495. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
  496. if (ret)
  497. break;
  498. offs += cnt;
  499. }
  500. }
  501. static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
  502. {
  503. uint8_t ret;
  504. sunxi_nfc_read_buf(mtd, &ret, 1);
  505. return ret;
  506. }
  507. static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
  508. unsigned int ctrl)
  509. {
  510. struct nand_chip *nand = mtd_to_nand(mtd);
  511. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  512. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  513. int ret;
  514. if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
  515. !(ctrl & (NAND_CLE | NAND_ALE))) {
  516. u32 cmd = 0;
  517. if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
  518. return;
  519. if (sunxi_nand->cmd_cycles--)
  520. cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
  521. if (sunxi_nand->cmd_cycles--) {
  522. cmd |= NFC_SEND_CMD2;
  523. writel(sunxi_nand->cmd[1],
  524. nfc->regs + NFC_REG_RCMD_SET);
  525. }
  526. sunxi_nand->cmd_cycles = 0;
  527. if (sunxi_nand->addr_cycles) {
  528. cmd |= NFC_SEND_ADR |
  529. NFC_ADR_NUM(sunxi_nand->addr_cycles);
  530. writel(sunxi_nand->addr[0],
  531. nfc->regs + NFC_REG_ADDR_LOW);
  532. }
  533. if (sunxi_nand->addr_cycles > 4)
  534. writel(sunxi_nand->addr[1],
  535. nfc->regs + NFC_REG_ADDR_HIGH);
  536. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  537. if (ret)
  538. return;
  539. writel(cmd, nfc->regs + NFC_REG_CMD);
  540. sunxi_nand->addr[0] = 0;
  541. sunxi_nand->addr[1] = 0;
  542. sunxi_nand->addr_cycles = 0;
  543. sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  544. }
  545. if (ctrl & NAND_CLE) {
  546. sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
  547. } else if (ctrl & NAND_ALE) {
  548. sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
  549. dat << ((sunxi_nand->addr_cycles % 4) * 8);
  550. sunxi_nand->addr_cycles++;
  551. }
  552. }
  553. /* These seed values have been extracted from Allwinner's BSP */
  554. static const u16 sunxi_nfc_randomizer_page_seeds[] = {
  555. 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
  556. 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
  557. 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
  558. 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
  559. 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
  560. 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
  561. 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
  562. 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
  563. 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
  564. 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
  565. 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
  566. 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
  567. 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
  568. 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
  569. 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
  570. 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
  571. };
  572. /*
  573. * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
  574. * have been generated using
  575. * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
  576. * the randomizer engine does internally before de/scrambling OOB data.
  577. *
  578. * Those tables are statically defined to avoid calculating randomizer state
  579. * at runtime.
  580. */
  581. static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
  582. 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
  583. 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
  584. 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
  585. 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
  586. 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
  587. 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
  588. 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
  589. 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
  590. 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
  591. 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
  592. 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
  593. 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
  594. 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
  595. 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
  596. 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
  597. 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
  598. };
  599. static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
  600. 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
  601. 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
  602. 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
  603. 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
  604. 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
  605. 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
  606. 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
  607. 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
  608. 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
  609. 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
  610. 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
  611. 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
  612. 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
  613. 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
  614. 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
  615. 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
  616. };
  617. static u16 sunxi_nfc_randomizer_step(u16 state, int count)
  618. {
  619. state &= 0x7fff;
  620. /*
  621. * This loop is just a simple implementation of a Fibonacci LFSR using
  622. * the x16 + x15 + 1 polynomial.
  623. */
  624. while (count--)
  625. state = ((state >> 1) |
  626. (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
  627. return state;
  628. }
  629. static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
  630. {
  631. const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
  632. int mod = mtd_div_by_ws(mtd->erasesize, mtd);
  633. if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
  634. mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
  635. if (ecc) {
  636. if (mtd->ecc_step_size == 512)
  637. seeds = sunxi_nfc_randomizer_ecc512_seeds;
  638. else
  639. seeds = sunxi_nfc_randomizer_ecc1024_seeds;
  640. }
  641. return seeds[page % mod];
  642. }
  643. static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
  644. int page, bool ecc)
  645. {
  646. struct nand_chip *nand = mtd_to_nand(mtd);
  647. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  648. u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  649. u16 state;
  650. if (!(nand->options & NAND_NEED_SCRAMBLING))
  651. return;
  652. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  653. state = sunxi_nfc_randomizer_state(mtd, page, ecc);
  654. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
  655. writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
  656. }
  657. static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
  658. {
  659. struct nand_chip *nand = mtd_to_nand(mtd);
  660. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  661. if (!(nand->options & NAND_NEED_SCRAMBLING))
  662. return;
  663. writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
  664. nfc->regs + NFC_REG_ECC_CTL);
  665. }
  666. static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
  667. {
  668. struct nand_chip *nand = mtd_to_nand(mtd);
  669. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  670. if (!(nand->options & NAND_NEED_SCRAMBLING))
  671. return;
  672. writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
  673. nfc->regs + NFC_REG_ECC_CTL);
  674. }
  675. static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
  676. {
  677. u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
  678. bbm[0] ^= state;
  679. bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
  680. }
  681. static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
  682. const uint8_t *buf, int len,
  683. bool ecc, int page)
  684. {
  685. sunxi_nfc_randomizer_config(mtd, page, ecc);
  686. sunxi_nfc_randomizer_enable(mtd);
  687. sunxi_nfc_write_buf(mtd, buf, len);
  688. sunxi_nfc_randomizer_disable(mtd);
  689. }
  690. static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
  691. int len, bool ecc, int page)
  692. {
  693. sunxi_nfc_randomizer_config(mtd, page, ecc);
  694. sunxi_nfc_randomizer_enable(mtd);
  695. sunxi_nfc_read_buf(mtd, buf, len);
  696. sunxi_nfc_randomizer_disable(mtd);
  697. }
  698. static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
  699. {
  700. struct nand_chip *nand = mtd_to_nand(mtd);
  701. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  702. struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
  703. u32 ecc_ctl;
  704. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  705. ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
  706. NFC_ECC_BLOCK_SIZE_MSK);
  707. ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
  708. NFC_ECC_PIPELINE;
  709. if (nand->ecc.size == 512)
  710. ecc_ctl |= NFC_ECC_BLOCK_512;
  711. writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
  712. }
  713. static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
  714. {
  715. struct nand_chip *nand = mtd_to_nand(mtd);
  716. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  717. writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
  718. nfc->regs + NFC_REG_ECC_CTL);
  719. }
  720. static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
  721. {
  722. buf[0] = user_data;
  723. buf[1] = user_data >> 8;
  724. buf[2] = user_data >> 16;
  725. buf[3] = user_data >> 24;
  726. }
  727. static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
  728. {
  729. return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
  730. }
  731. static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
  732. int step, bool bbm, int page)
  733. {
  734. struct nand_chip *nand = mtd_to_nand(mtd);
  735. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  736. sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
  737. oob);
  738. /* De-randomize the Bad Block Marker. */
  739. if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
  740. sunxi_nfc_randomize_bbm(mtd, page, oob);
  741. }
  742. static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
  743. const u8 *oob, int step,
  744. bool bbm, int page)
  745. {
  746. struct nand_chip *nand = mtd_to_nand(mtd);
  747. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  748. u8 user_data[4];
  749. /* Randomize the Bad Block Marker. */
  750. if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
  751. memcpy(user_data, oob, sizeof(user_data));
  752. sunxi_nfc_randomize_bbm(mtd, page, user_data);
  753. oob = user_data;
  754. }
  755. writel(sunxi_nfc_buf_to_user_data(oob),
  756. nfc->regs + NFC_REG_USER_DATA(step));
  757. }
  758. static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
  759. unsigned int *max_bitflips, int ret)
  760. {
  761. if (ret < 0) {
  762. mtd->ecc_stats.failed++;
  763. } else {
  764. mtd->ecc_stats.corrected += ret;
  765. *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
  766. }
  767. }
  768. static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
  769. int step, u32 status, bool *erased)
  770. {
  771. struct nand_chip *nand = mtd_to_nand(mtd);
  772. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  773. struct nand_ecc_ctrl *ecc = &nand->ecc;
  774. u32 tmp;
  775. *erased = false;
  776. if (status & NFC_ECC_ERR(step))
  777. return -EBADMSG;
  778. if (status & NFC_ECC_PAT_FOUND(step)) {
  779. u8 pattern;
  780. if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
  781. pattern = 0x0;
  782. } else {
  783. pattern = 0xff;
  784. *erased = true;
  785. }
  786. if (data)
  787. memset(data, pattern, ecc->size);
  788. if (oob)
  789. memset(oob, pattern, ecc->bytes + 4);
  790. return 0;
  791. }
  792. tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
  793. return NFC_ECC_ERR_CNT(step, tmp);
  794. }
  795. static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
  796. u8 *data, int data_off,
  797. u8 *oob, int oob_off,
  798. int *cur_off,
  799. unsigned int *max_bitflips,
  800. bool bbm, bool oob_required, int page)
  801. {
  802. struct nand_chip *nand = mtd_to_nand(mtd);
  803. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  804. struct nand_ecc_ctrl *ecc = &nand->ecc;
  805. int raw_mode = 0;
  806. bool erased;
  807. int ret;
  808. if (*cur_off != data_off)
  809. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
  810. sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
  811. if (data_off + ecc->size != oob_off)
  812. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  813. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  814. if (ret)
  815. return ret;
  816. sunxi_nfc_randomizer_enable(mtd);
  817. writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
  818. nfc->regs + NFC_REG_CMD);
  819. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
  820. sunxi_nfc_randomizer_disable(mtd);
  821. if (ret)
  822. return ret;
  823. *cur_off = oob_off + ecc->bytes + 4;
  824. ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
  825. readl(nfc->regs + NFC_REG_ECC_ST),
  826. &erased);
  827. if (erased)
  828. return 1;
  829. if (ret < 0) {
  830. /*
  831. * Re-read the data with the randomizer disabled to identify
  832. * bitflips in erased pages.
  833. */
  834. if (nand->options & NAND_NEED_SCRAMBLING) {
  835. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
  836. nand->read_buf(mtd, data, ecc->size);
  837. } else {
  838. memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
  839. ecc->size);
  840. }
  841. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  842. nand->read_buf(mtd, oob, ecc->bytes + 4);
  843. ret = nand_check_erased_ecc_chunk(data, ecc->size,
  844. oob, ecc->bytes + 4,
  845. NULL, 0, ecc->strength);
  846. if (ret >= 0)
  847. raw_mode = 1;
  848. } else {
  849. memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
  850. if (oob_required) {
  851. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  852. sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
  853. true, page);
  854. sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
  855. bbm, page);
  856. }
  857. }
  858. sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
  859. return raw_mode;
  860. }
  861. static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
  862. u8 *oob, int *cur_off,
  863. bool randomize, int page)
  864. {
  865. struct nand_chip *nand = mtd_to_nand(mtd);
  866. struct nand_ecc_ctrl *ecc = &nand->ecc;
  867. int offset = ((ecc->bytes + 4) * ecc->steps);
  868. int len = mtd->oobsize - offset;
  869. if (len <= 0)
  870. return;
  871. if (!cur_off || *cur_off != offset)
  872. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  873. offset + mtd->writesize, -1);
  874. if (!randomize)
  875. sunxi_nfc_read_buf(mtd, oob + offset, len);
  876. else
  877. sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
  878. false, page);
  879. if (cur_off)
  880. *cur_off = mtd->oobsize + mtd->writesize;
  881. }
  882. static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
  883. int oob_required, int page,
  884. int nchunks)
  885. {
  886. struct nand_chip *nand = mtd_to_nand(mtd);
  887. bool randomized = nand->options & NAND_NEED_SCRAMBLING;
  888. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  889. struct nand_ecc_ctrl *ecc = &nand->ecc;
  890. unsigned int max_bitflips = 0;
  891. int ret, i, raw_mode = 0;
  892. struct scatterlist sg;
  893. u32 status;
  894. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  895. if (ret)
  896. return ret;
  897. ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
  898. DMA_FROM_DEVICE, &sg);
  899. if (ret)
  900. return ret;
  901. sunxi_nfc_hw_ecc_enable(mtd);
  902. sunxi_nfc_randomizer_config(mtd, page, false);
  903. sunxi_nfc_randomizer_enable(mtd);
  904. writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
  905. NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
  906. dma_async_issue_pending(nfc->dmac);
  907. writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
  908. nfc->regs + NFC_REG_CMD);
  909. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
  910. if (ret)
  911. dmaengine_terminate_all(nfc->dmac);
  912. sunxi_nfc_randomizer_disable(mtd);
  913. sunxi_nfc_hw_ecc_disable(mtd);
  914. sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
  915. if (ret)
  916. return ret;
  917. status = readl(nfc->regs + NFC_REG_ECC_ST);
  918. for (i = 0; i < nchunks; i++) {
  919. int data_off = i * ecc->size;
  920. int oob_off = i * (ecc->bytes + 4);
  921. u8 *data = buf + data_off;
  922. u8 *oob = nand->oob_poi + oob_off;
  923. bool erased;
  924. ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
  925. oob_required ? oob : NULL,
  926. i, status, &erased);
  927. /* ECC errors are handled in the second loop. */
  928. if (ret < 0)
  929. continue;
  930. if (oob_required && !erased) {
  931. /* TODO: use DMA to retrieve OOB */
  932. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  933. mtd->writesize + oob_off, -1);
  934. nand->read_buf(mtd, oob, ecc->bytes + 4);
  935. sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
  936. !i, page);
  937. }
  938. if (erased)
  939. raw_mode = 1;
  940. sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
  941. }
  942. if (status & NFC_ECC_ERR_MSK) {
  943. for (i = 0; i < nchunks; i++) {
  944. int data_off = i * ecc->size;
  945. int oob_off = i * (ecc->bytes + 4);
  946. u8 *data = buf + data_off;
  947. u8 *oob = nand->oob_poi + oob_off;
  948. if (!(status & NFC_ECC_ERR(i)))
  949. continue;
  950. /*
  951. * Re-read the data with the randomizer disabled to
  952. * identify bitflips in erased pages.
  953. */
  954. if (randomized) {
  955. /* TODO: use DMA to read page in raw mode */
  956. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  957. data_off, -1);
  958. nand->read_buf(mtd, data, ecc->size);
  959. }
  960. /* TODO: use DMA to retrieve OOB */
  961. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  962. mtd->writesize + oob_off, -1);
  963. nand->read_buf(mtd, oob, ecc->bytes + 4);
  964. ret = nand_check_erased_ecc_chunk(data, ecc->size,
  965. oob, ecc->bytes + 4,
  966. NULL, 0,
  967. ecc->strength);
  968. if (ret >= 0)
  969. raw_mode = 1;
  970. sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
  971. }
  972. }
  973. if (oob_required)
  974. sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
  975. NULL, !raw_mode,
  976. page);
  977. return max_bitflips;
  978. }
  979. static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
  980. const u8 *data, int data_off,
  981. const u8 *oob, int oob_off,
  982. int *cur_off, bool bbm,
  983. int page)
  984. {
  985. struct nand_chip *nand = mtd_to_nand(mtd);
  986. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  987. struct nand_ecc_ctrl *ecc = &nand->ecc;
  988. int ret;
  989. if (data_off != *cur_off)
  990. nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
  991. sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
  992. if (data_off + ecc->size != oob_off)
  993. nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
  994. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  995. if (ret)
  996. return ret;
  997. sunxi_nfc_randomizer_enable(mtd);
  998. sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
  999. writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
  1000. NFC_ACCESS_DIR | NFC_ECC_OP,
  1001. nfc->regs + NFC_REG_CMD);
  1002. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
  1003. sunxi_nfc_randomizer_disable(mtd);
  1004. if (ret)
  1005. return ret;
  1006. *cur_off = oob_off + ecc->bytes + 4;
  1007. return 0;
  1008. }
  1009. static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
  1010. u8 *oob, int *cur_off,
  1011. int page)
  1012. {
  1013. struct nand_chip *nand = mtd_to_nand(mtd);
  1014. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1015. int offset = ((ecc->bytes + 4) * ecc->steps);
  1016. int len = mtd->oobsize - offset;
  1017. if (len <= 0)
  1018. return;
  1019. if (!cur_off || *cur_off != offset)
  1020. nand->cmdfunc(mtd, NAND_CMD_RNDIN,
  1021. offset + mtd->writesize, -1);
  1022. sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
  1023. if (cur_off)
  1024. *cur_off = mtd->oobsize + mtd->writesize;
  1025. }
  1026. static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
  1027. struct nand_chip *chip, uint8_t *buf,
  1028. int oob_required, int page)
  1029. {
  1030. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1031. unsigned int max_bitflips = 0;
  1032. int ret, i, cur_off = 0;
  1033. bool raw_mode = false;
  1034. sunxi_nfc_hw_ecc_enable(mtd);
  1035. for (i = 0; i < ecc->steps; i++) {
  1036. int data_off = i * ecc->size;
  1037. int oob_off = i * (ecc->bytes + 4);
  1038. u8 *data = buf + data_off;
  1039. u8 *oob = chip->oob_poi + oob_off;
  1040. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
  1041. oob_off + mtd->writesize,
  1042. &cur_off, &max_bitflips,
  1043. !i, oob_required, page);
  1044. if (ret < 0)
  1045. return ret;
  1046. else if (ret)
  1047. raw_mode = true;
  1048. }
  1049. if (oob_required)
  1050. sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
  1051. !raw_mode, page);
  1052. sunxi_nfc_hw_ecc_disable(mtd);
  1053. return max_bitflips;
  1054. }
  1055. static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
  1056. struct nand_chip *chip, u8 *buf,
  1057. int oob_required, int page)
  1058. {
  1059. int ret;
  1060. ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
  1061. chip->ecc.steps);
  1062. if (ret >= 0)
  1063. return ret;
  1064. /* Fallback to PIO mode */
  1065. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
  1066. return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
  1067. }
  1068. static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
  1069. struct nand_chip *chip,
  1070. u32 data_offs, u32 readlen,
  1071. u8 *bufpoi, int page)
  1072. {
  1073. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1074. int ret, i, cur_off = 0;
  1075. unsigned int max_bitflips = 0;
  1076. sunxi_nfc_hw_ecc_enable(mtd);
  1077. for (i = data_offs / ecc->size;
  1078. i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
  1079. int data_off = i * ecc->size;
  1080. int oob_off = i * (ecc->bytes + 4);
  1081. u8 *data = bufpoi + data_off;
  1082. u8 *oob = chip->oob_poi + oob_off;
  1083. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
  1084. oob,
  1085. oob_off + mtd->writesize,
  1086. &cur_off, &max_bitflips, !i,
  1087. false, page);
  1088. if (ret < 0)
  1089. return ret;
  1090. }
  1091. sunxi_nfc_hw_ecc_disable(mtd);
  1092. return max_bitflips;
  1093. }
  1094. static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
  1095. struct nand_chip *chip,
  1096. u32 data_offs, u32 readlen,
  1097. u8 *buf, int page)
  1098. {
  1099. int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
  1100. int ret;
  1101. ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
  1102. if (ret >= 0)
  1103. return ret;
  1104. /* Fallback to PIO mode */
  1105. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
  1106. return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
  1107. buf, page);
  1108. }
  1109. static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
  1110. struct nand_chip *chip,
  1111. const uint8_t *buf, int oob_required,
  1112. int page)
  1113. {
  1114. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1115. int ret, i, cur_off = 0;
  1116. sunxi_nfc_hw_ecc_enable(mtd);
  1117. for (i = 0; i < ecc->steps; i++) {
  1118. int data_off = i * ecc->size;
  1119. int oob_off = i * (ecc->bytes + 4);
  1120. const u8 *data = buf + data_off;
  1121. const u8 *oob = chip->oob_poi + oob_off;
  1122. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
  1123. oob_off + mtd->writesize,
  1124. &cur_off, !i, page);
  1125. if (ret)
  1126. return ret;
  1127. }
  1128. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1129. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1130. &cur_off, page);
  1131. sunxi_nfc_hw_ecc_disable(mtd);
  1132. return 0;
  1133. }
  1134. static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
  1135. struct nand_chip *chip,
  1136. u32 data_offs, u32 data_len,
  1137. const u8 *buf, int oob_required,
  1138. int page)
  1139. {
  1140. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1141. int ret, i, cur_off = 0;
  1142. sunxi_nfc_hw_ecc_enable(mtd);
  1143. for (i = data_offs / ecc->size;
  1144. i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
  1145. int data_off = i * ecc->size;
  1146. int oob_off = i * (ecc->bytes + 4);
  1147. const u8 *data = buf + data_off;
  1148. const u8 *oob = chip->oob_poi + oob_off;
  1149. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
  1150. oob_off + mtd->writesize,
  1151. &cur_off, !i, page);
  1152. if (ret)
  1153. return ret;
  1154. }
  1155. sunxi_nfc_hw_ecc_disable(mtd);
  1156. return 0;
  1157. }
  1158. static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
  1159. struct nand_chip *chip,
  1160. const u8 *buf,
  1161. int oob_required,
  1162. int page)
  1163. {
  1164. struct nand_chip *nand = mtd_to_nand(mtd);
  1165. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  1166. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1167. struct scatterlist sg;
  1168. int ret, i;
  1169. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  1170. if (ret)
  1171. return ret;
  1172. ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
  1173. DMA_TO_DEVICE, &sg);
  1174. if (ret)
  1175. goto pio_fallback;
  1176. for (i = 0; i < ecc->steps; i++) {
  1177. const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
  1178. sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
  1179. }
  1180. sunxi_nfc_hw_ecc_enable(mtd);
  1181. sunxi_nfc_randomizer_config(mtd, page, false);
  1182. sunxi_nfc_randomizer_enable(mtd);
  1183. writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
  1184. nfc->regs + NFC_REG_RCMD_SET);
  1185. dma_async_issue_pending(nfc->dmac);
  1186. writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
  1187. NFC_DATA_TRANS | NFC_ACCESS_DIR,
  1188. nfc->regs + NFC_REG_CMD);
  1189. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
  1190. if (ret)
  1191. dmaengine_terminate_all(nfc->dmac);
  1192. sunxi_nfc_randomizer_disable(mtd);
  1193. sunxi_nfc_hw_ecc_disable(mtd);
  1194. sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
  1195. if (ret)
  1196. return ret;
  1197. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1198. /* TODO: use DMA to transfer extra OOB bytes ? */
  1199. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1200. NULL, page);
  1201. return 0;
  1202. pio_fallback:
  1203. return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
  1204. }
  1205. static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
  1206. struct nand_chip *chip,
  1207. uint8_t *buf, int oob_required,
  1208. int page)
  1209. {
  1210. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1211. unsigned int max_bitflips = 0;
  1212. int ret, i, cur_off = 0;
  1213. bool raw_mode = false;
  1214. sunxi_nfc_hw_ecc_enable(mtd);
  1215. for (i = 0; i < ecc->steps; i++) {
  1216. int data_off = i * (ecc->size + ecc->bytes + 4);
  1217. int oob_off = data_off + ecc->size;
  1218. u8 *data = buf + (i * ecc->size);
  1219. u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
  1220. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
  1221. oob_off, &cur_off,
  1222. &max_bitflips, !i,
  1223. oob_required,
  1224. page);
  1225. if (ret < 0)
  1226. return ret;
  1227. else if (ret)
  1228. raw_mode = true;
  1229. }
  1230. if (oob_required)
  1231. sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
  1232. !raw_mode, page);
  1233. sunxi_nfc_hw_ecc_disable(mtd);
  1234. return max_bitflips;
  1235. }
  1236. static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
  1237. struct nand_chip *chip,
  1238. const uint8_t *buf,
  1239. int oob_required, int page)
  1240. {
  1241. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1242. int ret, i, cur_off = 0;
  1243. sunxi_nfc_hw_ecc_enable(mtd);
  1244. for (i = 0; i < ecc->steps; i++) {
  1245. int data_off = i * (ecc->size + ecc->bytes + 4);
  1246. int oob_off = data_off + ecc->size;
  1247. const u8 *data = buf + (i * ecc->size);
  1248. const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
  1249. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
  1250. oob, oob_off, &cur_off,
  1251. false, page);
  1252. if (ret)
  1253. return ret;
  1254. }
  1255. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1256. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1257. &cur_off, page);
  1258. sunxi_nfc_hw_ecc_disable(mtd);
  1259. return 0;
  1260. }
  1261. static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
  1262. struct nand_chip *chip,
  1263. int page)
  1264. {
  1265. chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
  1266. chip->pagebuf = -1;
  1267. return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
  1268. }
  1269. static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
  1270. struct nand_chip *chip,
  1271. int page)
  1272. {
  1273. int ret, status;
  1274. chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
  1275. chip->pagebuf = -1;
  1276. memset(chip->buffers->databuf, 0xff, mtd->writesize);
  1277. ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
  1278. if (ret)
  1279. return ret;
  1280. /* Send command to program the OOB data */
  1281. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1282. status = chip->waitfunc(mtd, chip);
  1283. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1284. }
  1285. static const s32 tWB_lut[] = {6, 12, 16, 20};
  1286. static const s32 tRHW_lut[] = {4, 8, 12, 20};
  1287. static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
  1288. u32 clk_period)
  1289. {
  1290. u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
  1291. int i;
  1292. for (i = 0; i < lut_size; i++) {
  1293. if (clk_cycles <= lut[i])
  1294. return i;
  1295. }
  1296. /* Doesn't fit */
  1297. return -EINVAL;
  1298. }
  1299. #define sunxi_nand_lookup_timing(l, p, c) \
  1300. _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
  1301. static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
  1302. const struct nand_data_interface *conf)
  1303. {
  1304. struct nand_chip *nand = mtd_to_nand(mtd);
  1305. struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
  1306. struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
  1307. const struct nand_sdr_timings *timings;
  1308. u32 min_clk_period = 0;
  1309. s32 tWB, tADL, tWHR, tRHW, tCAD;
  1310. long real_clk_rate;
  1311. timings = nand_get_sdr_timings(conf);
  1312. if (IS_ERR(timings))
  1313. return -ENOTSUPP;
  1314. /* T1 <=> tCLS */
  1315. if (timings->tCLS_min > min_clk_period)
  1316. min_clk_period = timings->tCLS_min;
  1317. /* T2 <=> tCLH */
  1318. if (timings->tCLH_min > min_clk_period)
  1319. min_clk_period = timings->tCLH_min;
  1320. /* T3 <=> tCS */
  1321. if (timings->tCS_min > min_clk_period)
  1322. min_clk_period = timings->tCS_min;
  1323. /* T4 <=> tCH */
  1324. if (timings->tCH_min > min_clk_period)
  1325. min_clk_period = timings->tCH_min;
  1326. /* T5 <=> tWP */
  1327. if (timings->tWP_min > min_clk_period)
  1328. min_clk_period = timings->tWP_min;
  1329. /* T6 <=> tWH */
  1330. if (timings->tWH_min > min_clk_period)
  1331. min_clk_period = timings->tWH_min;
  1332. /* T7 <=> tALS */
  1333. if (timings->tALS_min > min_clk_period)
  1334. min_clk_period = timings->tALS_min;
  1335. /* T8 <=> tDS */
  1336. if (timings->tDS_min > min_clk_period)
  1337. min_clk_period = timings->tDS_min;
  1338. /* T9 <=> tDH */
  1339. if (timings->tDH_min > min_clk_period)
  1340. min_clk_period = timings->tDH_min;
  1341. /* T10 <=> tRR */
  1342. if (timings->tRR_min > (min_clk_period * 3))
  1343. min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
  1344. /* T11 <=> tALH */
  1345. if (timings->tALH_min > min_clk_period)
  1346. min_clk_period = timings->tALH_min;
  1347. /* T12 <=> tRP */
  1348. if (timings->tRP_min > min_clk_period)
  1349. min_clk_period = timings->tRP_min;
  1350. /* T13 <=> tREH */
  1351. if (timings->tREH_min > min_clk_period)
  1352. min_clk_period = timings->tREH_min;
  1353. /* T14 <=> tRC */
  1354. if (timings->tRC_min > (min_clk_period * 2))
  1355. min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
  1356. /* T15 <=> tWC */
  1357. if (timings->tWC_min > (min_clk_period * 2))
  1358. min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
  1359. /* T16 - T19 + tCAD */
  1360. if (timings->tWB_max > (min_clk_period * 20))
  1361. min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
  1362. if (timings->tADL_min > (min_clk_period * 32))
  1363. min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
  1364. if (timings->tWHR_min > (min_clk_period * 32))
  1365. min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
  1366. if (timings->tRHW_min > (min_clk_period * 20))
  1367. min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
  1368. tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
  1369. min_clk_period);
  1370. if (tWB < 0) {
  1371. dev_err(nfc->dev, "unsupported tWB\n");
  1372. return tWB;
  1373. }
  1374. tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
  1375. if (tADL > 3) {
  1376. dev_err(nfc->dev, "unsupported tADL\n");
  1377. return -EINVAL;
  1378. }
  1379. tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
  1380. if (tWHR > 3) {
  1381. dev_err(nfc->dev, "unsupported tWHR\n");
  1382. return -EINVAL;
  1383. }
  1384. tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
  1385. min_clk_period);
  1386. if (tRHW < 0) {
  1387. dev_err(nfc->dev, "unsupported tRHW\n");
  1388. return tRHW;
  1389. }
  1390. if (csline == NAND_DATA_IFACE_CHECK_ONLY)
  1391. return 0;
  1392. /*
  1393. * TODO: according to ONFI specs this value only applies for DDR NAND,
  1394. * but Allwinner seems to set this to 0x7. Mimic them for now.
  1395. */
  1396. tCAD = 0x7;
  1397. /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
  1398. chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
  1399. /* Convert min_clk_period from picoseconds to nanoseconds */
  1400. min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
  1401. /*
  1402. * Unlike what is stated in Allwinner datasheet, the clk_rate should
  1403. * be set to (1 / min_clk_period), and not (2 / min_clk_period).
  1404. * This new formula was verified with a scope and validated by
  1405. * Allwinner engineers.
  1406. */
  1407. chip->clk_rate = NSEC_PER_SEC / min_clk_period;
  1408. real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
  1409. if (real_clk_rate <= 0) {
  1410. dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
  1411. return -EINVAL;
  1412. }
  1413. /*
  1414. * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
  1415. * output cycle timings shall be used if the host drives tRC less than
  1416. * 30 ns.
  1417. */
  1418. min_clk_period = NSEC_PER_SEC / real_clk_rate;
  1419. chip->timing_ctl = ((min_clk_period * 2) < 30) ?
  1420. NFC_TIMING_CTL_EDO : 0;
  1421. return 0;
  1422. }
  1423. static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  1424. struct mtd_oob_region *oobregion)
  1425. {
  1426. struct nand_chip *nand = mtd_to_nand(mtd);
  1427. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1428. if (section >= ecc->steps)
  1429. return -ERANGE;
  1430. oobregion->offset = section * (ecc->bytes + 4) + 4;
  1431. oobregion->length = ecc->bytes;
  1432. return 0;
  1433. }
  1434. static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
  1435. struct mtd_oob_region *oobregion)
  1436. {
  1437. struct nand_chip *nand = mtd_to_nand(mtd);
  1438. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1439. if (section > ecc->steps)
  1440. return -ERANGE;
  1441. /*
  1442. * The first 2 bytes are used for BB markers, hence we
  1443. * only have 2 bytes available in the first user data
  1444. * section.
  1445. */
  1446. if (!section && ecc->mode == NAND_ECC_HW) {
  1447. oobregion->offset = 2;
  1448. oobregion->length = 2;
  1449. return 0;
  1450. }
  1451. oobregion->offset = section * (ecc->bytes + 4);
  1452. if (section < ecc->steps)
  1453. oobregion->length = 4;
  1454. else
  1455. oobregion->offset = mtd->oobsize - oobregion->offset;
  1456. return 0;
  1457. }
  1458. static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
  1459. .ecc = sunxi_nand_ooblayout_ecc,
  1460. .free = sunxi_nand_ooblayout_free,
  1461. };
  1462. static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
  1463. struct nand_ecc_ctrl *ecc,
  1464. struct device_node *np)
  1465. {
  1466. static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
  1467. struct nand_chip *nand = mtd_to_nand(mtd);
  1468. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1469. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  1470. struct sunxi_nand_hw_ecc *data;
  1471. int nsectors;
  1472. int ret;
  1473. int i;
  1474. if (ecc->options & NAND_ECC_MAXIMIZE) {
  1475. int bytes;
  1476. ecc->size = 1024;
  1477. nsectors = mtd->writesize / ecc->size;
  1478. /* Reserve 2 bytes for the BBM */
  1479. bytes = (mtd->oobsize - 2) / nsectors;
  1480. /* 4 non-ECC bytes are added before each ECC bytes section */
  1481. bytes -= 4;
  1482. /* and bytes has to be even. */
  1483. if (bytes % 2)
  1484. bytes--;
  1485. ecc->strength = bytes * 8 / fls(8 * ecc->size);
  1486. for (i = 0; i < ARRAY_SIZE(strengths); i++) {
  1487. if (strengths[i] > ecc->strength)
  1488. break;
  1489. }
  1490. if (!i)
  1491. ecc->strength = 0;
  1492. else
  1493. ecc->strength = strengths[i - 1];
  1494. }
  1495. if (ecc->size != 512 && ecc->size != 1024)
  1496. return -EINVAL;
  1497. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1498. if (!data)
  1499. return -ENOMEM;
  1500. /* Prefer 1k ECC chunk over 512 ones */
  1501. if (ecc->size == 512 && mtd->writesize > 512) {
  1502. ecc->size = 1024;
  1503. ecc->strength *= 2;
  1504. }
  1505. /* Add ECC info retrieval from DT */
  1506. for (i = 0; i < ARRAY_SIZE(strengths); i++) {
  1507. if (ecc->strength <= strengths[i])
  1508. break;
  1509. }
  1510. if (i >= ARRAY_SIZE(strengths)) {
  1511. dev_err(nfc->dev, "unsupported strength\n");
  1512. ret = -ENOTSUPP;
  1513. goto err;
  1514. }
  1515. data->mode = i;
  1516. /* HW ECC always request ECC bytes for 1024 bytes blocks */
  1517. ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
  1518. /* HW ECC always work with even numbers of ECC bytes */
  1519. ecc->bytes = ALIGN(ecc->bytes, 2);
  1520. nsectors = mtd->writesize / ecc->size;
  1521. if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
  1522. ret = -EINVAL;
  1523. goto err;
  1524. }
  1525. ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob;
  1526. ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob;
  1527. mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
  1528. ecc->priv = data;
  1529. return 0;
  1530. err:
  1531. kfree(data);
  1532. return ret;
  1533. }
  1534. static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
  1535. {
  1536. kfree(ecc->priv);
  1537. }
  1538. static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
  1539. struct nand_ecc_ctrl *ecc,
  1540. struct device_node *np)
  1541. {
  1542. struct nand_chip *nand = mtd_to_nand(mtd);
  1543. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1544. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  1545. int ret;
  1546. ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
  1547. if (ret)
  1548. return ret;
  1549. if (nfc->dmac) {
  1550. ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
  1551. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
  1552. ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
  1553. nand->options |= NAND_USE_BOUNCE_BUFFER;
  1554. } else {
  1555. ecc->read_page = sunxi_nfc_hw_ecc_read_page;
  1556. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
  1557. ecc->write_page = sunxi_nfc_hw_ecc_write_page;
  1558. }
  1559. /* TODO: support DMA for raw accesses and subpage write */
  1560. ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
  1561. ecc->read_oob_raw = nand_read_oob_std;
  1562. ecc->write_oob_raw = nand_write_oob_std;
  1563. return 0;
  1564. }
  1565. static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
  1566. struct nand_ecc_ctrl *ecc,
  1567. struct device_node *np)
  1568. {
  1569. int ret;
  1570. ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
  1571. if (ret)
  1572. return ret;
  1573. ecc->prepad = 4;
  1574. ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
  1575. ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
  1576. ecc->read_oob_raw = nand_read_oob_syndrome;
  1577. ecc->write_oob_raw = nand_write_oob_syndrome;
  1578. return 0;
  1579. }
  1580. static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
  1581. {
  1582. switch (ecc->mode) {
  1583. case NAND_ECC_HW:
  1584. case NAND_ECC_HW_SYNDROME:
  1585. sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
  1586. break;
  1587. case NAND_ECC_NONE:
  1588. default:
  1589. break;
  1590. }
  1591. }
  1592. static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
  1593. struct device_node *np)
  1594. {
  1595. struct nand_chip *nand = mtd_to_nand(mtd);
  1596. int ret;
  1597. if (!ecc->size) {
  1598. ecc->size = nand->ecc_step_ds;
  1599. ecc->strength = nand->ecc_strength_ds;
  1600. }
  1601. if (!ecc->size || !ecc->strength)
  1602. return -EINVAL;
  1603. switch (ecc->mode) {
  1604. case NAND_ECC_HW:
  1605. ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
  1606. if (ret)
  1607. return ret;
  1608. break;
  1609. case NAND_ECC_HW_SYNDROME:
  1610. ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
  1611. if (ret)
  1612. return ret;
  1613. break;
  1614. case NAND_ECC_NONE:
  1615. case NAND_ECC_SOFT:
  1616. break;
  1617. default:
  1618. return -EINVAL;
  1619. }
  1620. return 0;
  1621. }
  1622. static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
  1623. struct device_node *np)
  1624. {
  1625. struct sunxi_nand_chip *chip;
  1626. struct mtd_info *mtd;
  1627. struct nand_chip *nand;
  1628. int nsels;
  1629. int ret;
  1630. int i;
  1631. u32 tmp;
  1632. if (!of_get_property(np, "reg", &nsels))
  1633. return -EINVAL;
  1634. nsels /= sizeof(u32);
  1635. if (!nsels) {
  1636. dev_err(dev, "invalid reg property size\n");
  1637. return -EINVAL;
  1638. }
  1639. chip = devm_kzalloc(dev,
  1640. sizeof(*chip) +
  1641. (nsels * sizeof(struct sunxi_nand_chip_sel)),
  1642. GFP_KERNEL);
  1643. if (!chip) {
  1644. dev_err(dev, "could not allocate chip\n");
  1645. return -ENOMEM;
  1646. }
  1647. chip->nsels = nsels;
  1648. chip->selected = -1;
  1649. for (i = 0; i < nsels; i++) {
  1650. ret = of_property_read_u32_index(np, "reg", i, &tmp);
  1651. if (ret) {
  1652. dev_err(dev, "could not retrieve reg property: %d\n",
  1653. ret);
  1654. return ret;
  1655. }
  1656. if (tmp > NFC_MAX_CS) {
  1657. dev_err(dev,
  1658. "invalid reg value: %u (max CS = 7)\n",
  1659. tmp);
  1660. return -EINVAL;
  1661. }
  1662. if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
  1663. dev_err(dev, "CS %d already assigned\n", tmp);
  1664. return -EINVAL;
  1665. }
  1666. chip->sels[i].cs = tmp;
  1667. if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
  1668. tmp < 2) {
  1669. chip->sels[i].rb.type = RB_NATIVE;
  1670. chip->sels[i].rb.info.nativeid = tmp;
  1671. } else {
  1672. ret = of_get_named_gpio(np, "rb-gpios", i);
  1673. if (ret >= 0) {
  1674. tmp = ret;
  1675. chip->sels[i].rb.type = RB_GPIO;
  1676. chip->sels[i].rb.info.gpio = tmp;
  1677. ret = devm_gpio_request(dev, tmp, "nand-rb");
  1678. if (ret)
  1679. return ret;
  1680. ret = gpio_direction_input(tmp);
  1681. if (ret)
  1682. return ret;
  1683. } else {
  1684. chip->sels[i].rb.type = RB_NONE;
  1685. }
  1686. }
  1687. }
  1688. nand = &chip->nand;
  1689. /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
  1690. nand->chip_delay = 200;
  1691. nand->controller = &nfc->controller;
  1692. /*
  1693. * Set the ECC mode to the default value in case nothing is specified
  1694. * in the DT.
  1695. */
  1696. nand->ecc.mode = NAND_ECC_HW;
  1697. nand_set_flash_node(nand, np);
  1698. nand->select_chip = sunxi_nfc_select_chip;
  1699. nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
  1700. nand->read_buf = sunxi_nfc_read_buf;
  1701. nand->write_buf = sunxi_nfc_write_buf;
  1702. nand->read_byte = sunxi_nfc_read_byte;
  1703. nand->setup_data_interface = sunxi_nfc_setup_data_interface;
  1704. mtd = nand_to_mtd(nand);
  1705. mtd->dev.parent = dev;
  1706. ret = nand_scan_ident(mtd, nsels, NULL);
  1707. if (ret)
  1708. return ret;
  1709. if (nand->bbt_options & NAND_BBT_USE_FLASH)
  1710. nand->bbt_options |= NAND_BBT_NO_OOB;
  1711. if (nand->options & NAND_NEED_SCRAMBLING)
  1712. nand->options |= NAND_NO_SUBPAGE_WRITE;
  1713. nand->options |= NAND_SUBPAGE_READ;
  1714. ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
  1715. if (ret) {
  1716. dev_err(dev, "ECC init failed: %d\n", ret);
  1717. return ret;
  1718. }
  1719. ret = nand_scan_tail(mtd);
  1720. if (ret) {
  1721. dev_err(dev, "nand_scan_tail failed: %d\n", ret);
  1722. return ret;
  1723. }
  1724. ret = mtd_device_register(mtd, NULL, 0);
  1725. if (ret) {
  1726. dev_err(dev, "failed to register mtd device: %d\n", ret);
  1727. nand_release(mtd);
  1728. return ret;
  1729. }
  1730. list_add_tail(&chip->node, &nfc->chips);
  1731. return 0;
  1732. }
  1733. static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
  1734. {
  1735. struct device_node *np = dev->of_node;
  1736. struct device_node *nand_np;
  1737. int nchips = of_get_child_count(np);
  1738. int ret;
  1739. if (nchips > 8) {
  1740. dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
  1741. return -EINVAL;
  1742. }
  1743. for_each_child_of_node(np, nand_np) {
  1744. ret = sunxi_nand_chip_init(dev, nfc, nand_np);
  1745. if (ret) {
  1746. of_node_put(nand_np);
  1747. return ret;
  1748. }
  1749. }
  1750. return 0;
  1751. }
  1752. static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
  1753. {
  1754. struct sunxi_nand_chip *chip;
  1755. while (!list_empty(&nfc->chips)) {
  1756. chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
  1757. node);
  1758. nand_release(nand_to_mtd(&chip->nand));
  1759. sunxi_nand_ecc_cleanup(&chip->nand.ecc);
  1760. list_del(&chip->node);
  1761. }
  1762. }
  1763. static int sunxi_nfc_probe(struct platform_device *pdev)
  1764. {
  1765. struct device *dev = &pdev->dev;
  1766. struct resource *r;
  1767. struct sunxi_nfc *nfc;
  1768. int irq;
  1769. int ret;
  1770. nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
  1771. if (!nfc)
  1772. return -ENOMEM;
  1773. nfc->dev = dev;
  1774. nand_hw_control_init(&nfc->controller);
  1775. INIT_LIST_HEAD(&nfc->chips);
  1776. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1777. nfc->regs = devm_ioremap_resource(dev, r);
  1778. if (IS_ERR(nfc->regs))
  1779. return PTR_ERR(nfc->regs);
  1780. irq = platform_get_irq(pdev, 0);
  1781. if (irq < 0) {
  1782. dev_err(dev, "failed to retrieve irq\n");
  1783. return irq;
  1784. }
  1785. nfc->ahb_clk = devm_clk_get(dev, "ahb");
  1786. if (IS_ERR(nfc->ahb_clk)) {
  1787. dev_err(dev, "failed to retrieve ahb clk\n");
  1788. return PTR_ERR(nfc->ahb_clk);
  1789. }
  1790. ret = clk_prepare_enable(nfc->ahb_clk);
  1791. if (ret)
  1792. return ret;
  1793. nfc->mod_clk = devm_clk_get(dev, "mod");
  1794. if (IS_ERR(nfc->mod_clk)) {
  1795. dev_err(dev, "failed to retrieve mod clk\n");
  1796. ret = PTR_ERR(nfc->mod_clk);
  1797. goto out_ahb_clk_unprepare;
  1798. }
  1799. ret = clk_prepare_enable(nfc->mod_clk);
  1800. if (ret)
  1801. goto out_ahb_clk_unprepare;
  1802. nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
  1803. if (IS_ERR(nfc->reset)) {
  1804. ret = PTR_ERR(nfc->reset);
  1805. goto out_mod_clk_unprepare;
  1806. }
  1807. ret = reset_control_deassert(nfc->reset);
  1808. if (ret) {
  1809. dev_err(dev, "reset err %d\n", ret);
  1810. goto out_mod_clk_unprepare;
  1811. }
  1812. ret = sunxi_nfc_rst(nfc);
  1813. if (ret)
  1814. goto out_ahb_reset_reassert;
  1815. writel(0, nfc->regs + NFC_REG_INT);
  1816. ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
  1817. 0, "sunxi-nand", nfc);
  1818. if (ret)
  1819. goto out_ahb_reset_reassert;
  1820. nfc->dmac = dma_request_slave_channel(dev, "rxtx");
  1821. if (nfc->dmac) {
  1822. struct dma_slave_config dmac_cfg = { };
  1823. dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
  1824. dmac_cfg.dst_addr = dmac_cfg.src_addr;
  1825. dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1826. dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
  1827. dmac_cfg.src_maxburst = 4;
  1828. dmac_cfg.dst_maxburst = 4;
  1829. dmaengine_slave_config(nfc->dmac, &dmac_cfg);
  1830. } else {
  1831. dev_warn(dev, "failed to request rxtx DMA channel\n");
  1832. }
  1833. platform_set_drvdata(pdev, nfc);
  1834. ret = sunxi_nand_chips_init(dev, nfc);
  1835. if (ret) {
  1836. dev_err(dev, "failed to init nand chips\n");
  1837. goto out_release_dmac;
  1838. }
  1839. return 0;
  1840. out_release_dmac:
  1841. if (nfc->dmac)
  1842. dma_release_channel(nfc->dmac);
  1843. out_ahb_reset_reassert:
  1844. reset_control_assert(nfc->reset);
  1845. out_mod_clk_unprepare:
  1846. clk_disable_unprepare(nfc->mod_clk);
  1847. out_ahb_clk_unprepare:
  1848. clk_disable_unprepare(nfc->ahb_clk);
  1849. return ret;
  1850. }
  1851. static int sunxi_nfc_remove(struct platform_device *pdev)
  1852. {
  1853. struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
  1854. sunxi_nand_chips_cleanup(nfc);
  1855. reset_control_assert(nfc->reset);
  1856. if (nfc->dmac)
  1857. dma_release_channel(nfc->dmac);
  1858. clk_disable_unprepare(nfc->mod_clk);
  1859. clk_disable_unprepare(nfc->ahb_clk);
  1860. return 0;
  1861. }
  1862. static const struct of_device_id sunxi_nfc_ids[] = {
  1863. { .compatible = "allwinner,sun4i-a10-nand" },
  1864. { /* sentinel */ }
  1865. };
  1866. MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
  1867. static struct platform_driver sunxi_nfc_driver = {
  1868. .driver = {
  1869. .name = "sunxi_nand",
  1870. .of_match_table = sunxi_nfc_ids,
  1871. },
  1872. .probe = sunxi_nfc_probe,
  1873. .remove = sunxi_nfc_remove,
  1874. };
  1875. module_platform_driver(sunxi_nfc_driver);
  1876. MODULE_LICENSE("GPL v2");
  1877. MODULE_AUTHOR("Boris BREZILLON");
  1878. MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
  1879. MODULE_ALIAS("platform:sunxi_nand");