qcom_nandc.c 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/slab.h>
  15. #include <linux/bitops.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/module.h>
  19. #include <linux/mtd/rawnand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/delay.h>
  24. #include <linux/dma/qcom_bam_dma.h>
  25. #include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
  26. /* NANDc reg offsets */
  27. #define NAND_FLASH_CMD 0x00
  28. #define NAND_ADDR0 0x04
  29. #define NAND_ADDR1 0x08
  30. #define NAND_FLASH_CHIP_SELECT 0x0c
  31. #define NAND_EXEC_CMD 0x10
  32. #define NAND_FLASH_STATUS 0x14
  33. #define NAND_BUFFER_STATUS 0x18
  34. #define NAND_DEV0_CFG0 0x20
  35. #define NAND_DEV0_CFG1 0x24
  36. #define NAND_DEV0_ECC_CFG 0x28
  37. #define NAND_DEV1_ECC_CFG 0x2c
  38. #define NAND_DEV1_CFG0 0x30
  39. #define NAND_DEV1_CFG1 0x34
  40. #define NAND_READ_ID 0x40
  41. #define NAND_READ_STATUS 0x44
  42. #define NAND_DEV_CMD0 0xa0
  43. #define NAND_DEV_CMD1 0xa4
  44. #define NAND_DEV_CMD2 0xa8
  45. #define NAND_DEV_CMD_VLD 0xac
  46. #define SFLASHC_BURST_CFG 0xe0
  47. #define NAND_ERASED_CW_DETECT_CFG 0xe8
  48. #define NAND_ERASED_CW_DETECT_STATUS 0xec
  49. #define NAND_EBI2_ECC_BUF_CFG 0xf0
  50. #define FLASH_BUF_ACC 0x100
  51. #define NAND_CTRL 0xf00
  52. #define NAND_VERSION 0xf08
  53. #define NAND_READ_LOCATION_0 0xf20
  54. #define NAND_READ_LOCATION_1 0xf24
  55. #define NAND_READ_LOCATION_2 0xf28
  56. #define NAND_READ_LOCATION_3 0xf2c
  57. /* dummy register offsets, used by write_reg_dma */
  58. #define NAND_DEV_CMD1_RESTORE 0xdead
  59. #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
  60. /* NAND_FLASH_CMD bits */
  61. #define PAGE_ACC BIT(4)
  62. #define LAST_PAGE BIT(5)
  63. /* NAND_FLASH_CHIP_SELECT bits */
  64. #define NAND_DEV_SEL 0
  65. #define DM_EN BIT(2)
  66. /* NAND_FLASH_STATUS bits */
  67. #define FS_OP_ERR BIT(4)
  68. #define FS_READY_BSY_N BIT(5)
  69. #define FS_MPU_ERR BIT(8)
  70. #define FS_DEVICE_STS_ERR BIT(16)
  71. #define FS_DEVICE_WP BIT(23)
  72. /* NAND_BUFFER_STATUS bits */
  73. #define BS_UNCORRECTABLE_BIT BIT(8)
  74. #define BS_CORRECTABLE_ERR_MSK 0x1f
  75. /* NAND_DEVn_CFG0 bits */
  76. #define DISABLE_STATUS_AFTER_WRITE 4
  77. #define CW_PER_PAGE 6
  78. #define UD_SIZE_BYTES 9
  79. #define ECC_PARITY_SIZE_BYTES_RS 19
  80. #define SPARE_SIZE_BYTES 23
  81. #define NUM_ADDR_CYCLES 27
  82. #define STATUS_BFR_READ 30
  83. #define SET_RD_MODE_AFTER_STATUS 31
  84. /* NAND_DEVn_CFG0 bits */
  85. #define DEV0_CFG1_ECC_DISABLE 0
  86. #define WIDE_FLASH 1
  87. #define NAND_RECOVERY_CYCLES 2
  88. #define CS_ACTIVE_BSY 5
  89. #define BAD_BLOCK_BYTE_NUM 6
  90. #define BAD_BLOCK_IN_SPARE_AREA 16
  91. #define WR_RD_BSY_GAP 17
  92. #define ENABLE_BCH_ECC 27
  93. /* NAND_DEV0_ECC_CFG bits */
  94. #define ECC_CFG_ECC_DISABLE 0
  95. #define ECC_SW_RESET 1
  96. #define ECC_MODE 4
  97. #define ECC_PARITY_SIZE_BYTES_BCH 8
  98. #define ECC_NUM_DATA_BYTES 16
  99. #define ECC_FORCE_CLK_OPEN 30
  100. /* NAND_DEV_CMD1 bits */
  101. #define READ_ADDR 0
  102. /* NAND_DEV_CMD_VLD bits */
  103. #define READ_START_VLD BIT(0)
  104. #define READ_STOP_VLD BIT(1)
  105. #define WRITE_START_VLD BIT(2)
  106. #define ERASE_START_VLD BIT(3)
  107. #define SEQ_READ_START_VLD BIT(4)
  108. /* NAND_EBI2_ECC_BUF_CFG bits */
  109. #define NUM_STEPS 0
  110. /* NAND_ERASED_CW_DETECT_CFG bits */
  111. #define ERASED_CW_ECC_MASK 1
  112. #define AUTO_DETECT_RES 0
  113. #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
  114. #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
  115. #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
  116. #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
  117. #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
  118. /* NAND_ERASED_CW_DETECT_STATUS bits */
  119. #define PAGE_ALL_ERASED BIT(7)
  120. #define CODEWORD_ALL_ERASED BIT(6)
  121. #define PAGE_ERASED BIT(5)
  122. #define CODEWORD_ERASED BIT(4)
  123. #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
  124. #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
  125. /* NAND_READ_LOCATION_n bits */
  126. #define READ_LOCATION_OFFSET 0
  127. #define READ_LOCATION_SIZE 16
  128. #define READ_LOCATION_LAST 31
  129. /* Version Mask */
  130. #define NAND_VERSION_MAJOR_MASK 0xf0000000
  131. #define NAND_VERSION_MAJOR_SHIFT 28
  132. #define NAND_VERSION_MINOR_MASK 0x0fff0000
  133. #define NAND_VERSION_MINOR_SHIFT 16
  134. /* NAND OP_CMDs */
  135. #define PAGE_READ 0x2
  136. #define PAGE_READ_WITH_ECC 0x3
  137. #define PAGE_READ_WITH_ECC_SPARE 0x4
  138. #define PROGRAM_PAGE 0x6
  139. #define PAGE_PROGRAM_WITH_ECC 0x7
  140. #define PROGRAM_PAGE_SPARE 0x9
  141. #define BLOCK_ERASE 0xa
  142. #define FETCH_ID 0xb
  143. #define RESET_DEVICE 0xd
  144. /* Default Value for NAND_DEV_CMD_VLD */
  145. #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
  146. ERASE_START_VLD | SEQ_READ_START_VLD)
  147. /* NAND_CTRL bits */
  148. #define BAM_MODE_EN BIT(0)
  149. /*
  150. * the NAND controller performs reads/writes with ECC in 516 byte chunks.
  151. * the driver calls the chunks 'step' or 'codeword' interchangeably
  152. */
  153. #define NANDC_STEP_SIZE 512
  154. /*
  155. * the largest page size we support is 8K, this will have 16 steps/codewords
  156. * of 512 bytes each
  157. */
  158. #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
  159. /* we read at most 3 registers per codeword scan */
  160. #define MAX_REG_RD (3 * MAX_NUM_STEPS)
  161. /* ECC modes supported by the controller */
  162. #define ECC_NONE BIT(0)
  163. #define ECC_RS_4BIT BIT(1)
  164. #define ECC_BCH_4BIT BIT(2)
  165. #define ECC_BCH_8BIT BIT(3)
  166. #define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
  167. nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
  168. ((offset) << READ_LOCATION_OFFSET) | \
  169. ((size) << READ_LOCATION_SIZE) | \
  170. ((is_last) << READ_LOCATION_LAST))
  171. /*
  172. * Returns the actual register address for all NAND_DEV_ registers
  173. * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
  174. */
  175. #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
  176. /* Returns the NAND register physical address */
  177. #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
  178. /* Returns the dma address for reg read buffer */
  179. #define reg_buf_dma_addr(chip, vaddr) \
  180. ((chip)->reg_read_dma + \
  181. ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
  182. #define QPIC_PER_CW_CMD_ELEMENTS 32
  183. #define QPIC_PER_CW_CMD_SGL 32
  184. #define QPIC_PER_CW_DATA_SGL 8
  185. /*
  186. * Flags used in DMA descriptor preparation helper functions
  187. * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
  188. */
  189. /* Don't set the EOT in current tx BAM sgl */
  190. #define NAND_BAM_NO_EOT BIT(0)
  191. /* Set the NWD flag in current BAM sgl */
  192. #define NAND_BAM_NWD BIT(1)
  193. /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
  194. #define NAND_BAM_NEXT_SGL BIT(2)
  195. /*
  196. * Erased codeword status is being used two times in single transfer so this
  197. * flag will determine the current value of erased codeword status register
  198. */
  199. #define NAND_ERASED_CW_SET BIT(4)
  200. /*
  201. * This data type corresponds to the BAM transaction which will be used for all
  202. * NAND transfers.
  203. * @bam_ce - the array of BAM command elements
  204. * @cmd_sgl - sgl for NAND BAM command pipe
  205. * @data_sgl - sgl for NAND BAM consumer/producer pipe
  206. * @bam_ce_pos - the index in bam_ce which is available for next sgl
  207. * @bam_ce_start - the index in bam_ce which marks the start position ce
  208. * for current sgl. It will be used for size calculation
  209. * for current sgl
  210. * @cmd_sgl_pos - current index in command sgl.
  211. * @cmd_sgl_start - start index in command sgl.
  212. * @tx_sgl_pos - current index in data sgl for tx.
  213. * @tx_sgl_start - start index in data sgl for tx.
  214. * @rx_sgl_pos - current index in data sgl for rx.
  215. * @rx_sgl_start - start index in data sgl for rx.
  216. */
  217. struct bam_transaction {
  218. struct bam_cmd_element *bam_ce;
  219. struct scatterlist *cmd_sgl;
  220. struct scatterlist *data_sgl;
  221. u32 bam_ce_pos;
  222. u32 bam_ce_start;
  223. u32 cmd_sgl_pos;
  224. u32 cmd_sgl_start;
  225. u32 tx_sgl_pos;
  226. u32 tx_sgl_start;
  227. u32 rx_sgl_pos;
  228. u32 rx_sgl_start;
  229. };
  230. /*
  231. * This data type corresponds to the nand dma descriptor
  232. * @list - list for desc_info
  233. * @dir - DMA transfer direction
  234. * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
  235. * ADM
  236. * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
  237. * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
  238. * @dma_desc - low level DMA engine descriptor
  239. */
  240. struct desc_info {
  241. struct list_head node;
  242. enum dma_data_direction dir;
  243. union {
  244. struct scatterlist adm_sgl;
  245. struct {
  246. struct scatterlist *bam_sgl;
  247. int sgl_cnt;
  248. };
  249. };
  250. struct dma_async_tx_descriptor *dma_desc;
  251. };
  252. /*
  253. * holds the current register values that we want to write. acts as a contiguous
  254. * chunk of memory which we use to write the controller registers through DMA.
  255. */
  256. struct nandc_regs {
  257. __le32 cmd;
  258. __le32 addr0;
  259. __le32 addr1;
  260. __le32 chip_sel;
  261. __le32 exec;
  262. __le32 cfg0;
  263. __le32 cfg1;
  264. __le32 ecc_bch_cfg;
  265. __le32 clrflashstatus;
  266. __le32 clrreadstatus;
  267. __le32 cmd1;
  268. __le32 vld;
  269. __le32 orig_cmd1;
  270. __le32 orig_vld;
  271. __le32 ecc_buf_cfg;
  272. __le32 read_location0;
  273. __le32 read_location1;
  274. __le32 read_location2;
  275. __le32 read_location3;
  276. __le32 erased_cw_detect_cfg_clr;
  277. __le32 erased_cw_detect_cfg_set;
  278. };
  279. /*
  280. * NAND controller data struct
  281. *
  282. * @controller: base controller structure
  283. * @host_list: list containing all the chips attached to the
  284. * controller
  285. * @dev: parent device
  286. * @base: MMIO base
  287. * @base_phys: physical base address of controller registers
  288. * @base_dma: dma base address of controller registers
  289. * @core_clk: controller clock
  290. * @aon_clk: another controller clock
  291. *
  292. * @chan: dma channel
  293. * @cmd_crci: ADM DMA CRCI for command flow control
  294. * @data_crci: ADM DMA CRCI for data flow control
  295. * @desc_list: DMA descriptor list (list of desc_infos)
  296. *
  297. * @data_buffer: our local DMA buffer for page read/writes,
  298. * used when we can't use the buffer provided
  299. * by upper layers directly
  300. * @buf_size/count/start: markers for chip->read_buf/write_buf functions
  301. * @reg_read_buf: local buffer for reading back registers via DMA
  302. * @reg_read_dma: contains dma address for register read buffer
  303. * @reg_read_pos: marker for data read in reg_read_buf
  304. *
  305. * @regs: a contiguous chunk of memory for DMA register
  306. * writes. contains the register values to be
  307. * written to controller
  308. * @cmd1/vld: some fixed controller register values
  309. * @props: properties of current NAND controller,
  310. * initialized via DT match data
  311. * @max_cwperpage: maximum QPIC codewords required. calculated
  312. * from all connected NAND devices pagesize
  313. */
  314. struct qcom_nand_controller {
  315. struct nand_hw_control controller;
  316. struct list_head host_list;
  317. struct device *dev;
  318. void __iomem *base;
  319. phys_addr_t base_phys;
  320. dma_addr_t base_dma;
  321. struct clk *core_clk;
  322. struct clk *aon_clk;
  323. union {
  324. /* will be used only by QPIC for BAM DMA */
  325. struct {
  326. struct dma_chan *tx_chan;
  327. struct dma_chan *rx_chan;
  328. struct dma_chan *cmd_chan;
  329. };
  330. /* will be used only by EBI2 for ADM DMA */
  331. struct {
  332. struct dma_chan *chan;
  333. unsigned int cmd_crci;
  334. unsigned int data_crci;
  335. };
  336. };
  337. struct list_head desc_list;
  338. struct bam_transaction *bam_txn;
  339. u8 *data_buffer;
  340. int buf_size;
  341. int buf_count;
  342. int buf_start;
  343. unsigned int max_cwperpage;
  344. __le32 *reg_read_buf;
  345. dma_addr_t reg_read_dma;
  346. int reg_read_pos;
  347. struct nandc_regs *regs;
  348. u32 cmd1, vld;
  349. const struct qcom_nandc_props *props;
  350. };
  351. /*
  352. * NAND chip structure
  353. *
  354. * @chip: base NAND chip structure
  355. * @node: list node to add itself to host_list in
  356. * qcom_nand_controller
  357. *
  358. * @cs: chip select value for this chip
  359. * @cw_size: the number of bytes in a single step/codeword
  360. * of a page, consisting of all data, ecc, spare
  361. * and reserved bytes
  362. * @cw_data: the number of bytes within a codeword protected
  363. * by ECC
  364. * @use_ecc: request the controller to use ECC for the
  365. * upcoming read/write
  366. * @bch_enabled: flag to tell whether BCH ECC mode is used
  367. * @ecc_bytes_hw: ECC bytes used by controller hardware for this
  368. * chip
  369. * @status: value to be returned if NAND_CMD_STATUS command
  370. * is executed
  371. * @last_command: keeps track of last command on this chip. used
  372. * for reading correct status
  373. *
  374. * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
  375. * ecc/non-ecc mode for the current nand flash
  376. * device
  377. */
  378. struct qcom_nand_host {
  379. struct nand_chip chip;
  380. struct list_head node;
  381. int cs;
  382. int cw_size;
  383. int cw_data;
  384. bool use_ecc;
  385. bool bch_enabled;
  386. int ecc_bytes_hw;
  387. int spare_bytes;
  388. int bbm_size;
  389. u8 status;
  390. int last_command;
  391. u32 cfg0, cfg1;
  392. u32 cfg0_raw, cfg1_raw;
  393. u32 ecc_buf_cfg;
  394. u32 ecc_bch_cfg;
  395. u32 clrflashstatus;
  396. u32 clrreadstatus;
  397. };
  398. /*
  399. * This data type corresponds to the NAND controller properties which varies
  400. * among different NAND controllers.
  401. * @ecc_modes - ecc mode for NAND
  402. * @is_bam - whether NAND controller is using BAM
  403. * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
  404. */
  405. struct qcom_nandc_props {
  406. u32 ecc_modes;
  407. bool is_bam;
  408. u32 dev_cmd_reg_start;
  409. };
  410. /* Frees the BAM transaction memory */
  411. static void free_bam_transaction(struct qcom_nand_controller *nandc)
  412. {
  413. struct bam_transaction *bam_txn = nandc->bam_txn;
  414. devm_kfree(nandc->dev, bam_txn);
  415. }
  416. /* Allocates and Initializes the BAM transaction */
  417. static struct bam_transaction *
  418. alloc_bam_transaction(struct qcom_nand_controller *nandc)
  419. {
  420. struct bam_transaction *bam_txn;
  421. size_t bam_txn_size;
  422. unsigned int num_cw = nandc->max_cwperpage;
  423. void *bam_txn_buf;
  424. bam_txn_size =
  425. sizeof(*bam_txn) + num_cw *
  426. ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
  427. (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
  428. (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
  429. bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
  430. if (!bam_txn_buf)
  431. return NULL;
  432. bam_txn = bam_txn_buf;
  433. bam_txn_buf += sizeof(*bam_txn);
  434. bam_txn->bam_ce = bam_txn_buf;
  435. bam_txn_buf +=
  436. sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
  437. bam_txn->cmd_sgl = bam_txn_buf;
  438. bam_txn_buf +=
  439. sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
  440. bam_txn->data_sgl = bam_txn_buf;
  441. return bam_txn;
  442. }
  443. /* Clears the BAM transaction indexes */
  444. static void clear_bam_transaction(struct qcom_nand_controller *nandc)
  445. {
  446. struct bam_transaction *bam_txn = nandc->bam_txn;
  447. if (!nandc->props->is_bam)
  448. return;
  449. bam_txn->bam_ce_pos = 0;
  450. bam_txn->bam_ce_start = 0;
  451. bam_txn->cmd_sgl_pos = 0;
  452. bam_txn->cmd_sgl_start = 0;
  453. bam_txn->tx_sgl_pos = 0;
  454. bam_txn->tx_sgl_start = 0;
  455. bam_txn->rx_sgl_pos = 0;
  456. bam_txn->rx_sgl_start = 0;
  457. sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
  458. QPIC_PER_CW_CMD_SGL);
  459. sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
  460. QPIC_PER_CW_DATA_SGL);
  461. }
  462. static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
  463. {
  464. return container_of(chip, struct qcom_nand_host, chip);
  465. }
  466. static inline struct qcom_nand_controller *
  467. get_qcom_nand_controller(struct nand_chip *chip)
  468. {
  469. return container_of(chip->controller, struct qcom_nand_controller,
  470. controller);
  471. }
  472. static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
  473. {
  474. return ioread32(nandc->base + offset);
  475. }
  476. static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
  477. u32 val)
  478. {
  479. iowrite32(val, nandc->base + offset);
  480. }
  481. static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
  482. bool is_cpu)
  483. {
  484. if (!nandc->props->is_bam)
  485. return;
  486. if (is_cpu)
  487. dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
  488. MAX_REG_RD *
  489. sizeof(*nandc->reg_read_buf),
  490. DMA_FROM_DEVICE);
  491. else
  492. dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
  493. MAX_REG_RD *
  494. sizeof(*nandc->reg_read_buf),
  495. DMA_FROM_DEVICE);
  496. }
  497. static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
  498. {
  499. switch (offset) {
  500. case NAND_FLASH_CMD:
  501. return &regs->cmd;
  502. case NAND_ADDR0:
  503. return &regs->addr0;
  504. case NAND_ADDR1:
  505. return &regs->addr1;
  506. case NAND_FLASH_CHIP_SELECT:
  507. return &regs->chip_sel;
  508. case NAND_EXEC_CMD:
  509. return &regs->exec;
  510. case NAND_FLASH_STATUS:
  511. return &regs->clrflashstatus;
  512. case NAND_DEV0_CFG0:
  513. return &regs->cfg0;
  514. case NAND_DEV0_CFG1:
  515. return &regs->cfg1;
  516. case NAND_DEV0_ECC_CFG:
  517. return &regs->ecc_bch_cfg;
  518. case NAND_READ_STATUS:
  519. return &regs->clrreadstatus;
  520. case NAND_DEV_CMD1:
  521. return &regs->cmd1;
  522. case NAND_DEV_CMD1_RESTORE:
  523. return &regs->orig_cmd1;
  524. case NAND_DEV_CMD_VLD:
  525. return &regs->vld;
  526. case NAND_DEV_CMD_VLD_RESTORE:
  527. return &regs->orig_vld;
  528. case NAND_EBI2_ECC_BUF_CFG:
  529. return &regs->ecc_buf_cfg;
  530. case NAND_READ_LOCATION_0:
  531. return &regs->read_location0;
  532. case NAND_READ_LOCATION_1:
  533. return &regs->read_location1;
  534. case NAND_READ_LOCATION_2:
  535. return &regs->read_location2;
  536. case NAND_READ_LOCATION_3:
  537. return &regs->read_location3;
  538. default:
  539. return NULL;
  540. }
  541. }
  542. static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
  543. u32 val)
  544. {
  545. struct nandc_regs *regs = nandc->regs;
  546. __le32 *reg;
  547. reg = offset_to_nandc_reg(regs, offset);
  548. if (reg)
  549. *reg = cpu_to_le32(val);
  550. }
  551. /* helper to configure address register values */
  552. static void set_address(struct qcom_nand_host *host, u16 column, int page)
  553. {
  554. struct nand_chip *chip = &host->chip;
  555. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  556. if (chip->options & NAND_BUSWIDTH_16)
  557. column >>= 1;
  558. nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
  559. nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
  560. }
  561. /*
  562. * update_rw_regs: set up read/write register values, these will be
  563. * written to the NAND controller registers via DMA
  564. *
  565. * @num_cw: number of steps for the read/write operation
  566. * @read: read or write operation
  567. */
  568. static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
  569. {
  570. struct nand_chip *chip = &host->chip;
  571. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  572. u32 cmd, cfg0, cfg1, ecc_bch_cfg;
  573. if (read) {
  574. if (host->use_ecc)
  575. cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
  576. else
  577. cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
  578. } else {
  579. cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
  580. }
  581. if (host->use_ecc) {
  582. cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
  583. (num_cw - 1) << CW_PER_PAGE;
  584. cfg1 = host->cfg1;
  585. ecc_bch_cfg = host->ecc_bch_cfg;
  586. } else {
  587. cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
  588. (num_cw - 1) << CW_PER_PAGE;
  589. cfg1 = host->cfg1_raw;
  590. ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  591. }
  592. nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
  593. nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
  594. nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
  595. nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
  596. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
  597. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  598. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  599. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  600. if (read)
  601. nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
  602. host->cw_data : host->cw_size, 1);
  603. }
  604. /*
  605. * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
  606. * for BAM. This descriptor will be added in the NAND DMA descriptor queue
  607. * which will be submitted to DMA engine.
  608. */
  609. static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
  610. struct dma_chan *chan,
  611. unsigned long flags)
  612. {
  613. struct desc_info *desc;
  614. struct scatterlist *sgl;
  615. unsigned int sgl_cnt;
  616. int ret;
  617. struct bam_transaction *bam_txn = nandc->bam_txn;
  618. enum dma_transfer_direction dir_eng;
  619. struct dma_async_tx_descriptor *dma_desc;
  620. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  621. if (!desc)
  622. return -ENOMEM;
  623. if (chan == nandc->cmd_chan) {
  624. sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
  625. sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
  626. bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
  627. dir_eng = DMA_MEM_TO_DEV;
  628. desc->dir = DMA_TO_DEVICE;
  629. } else if (chan == nandc->tx_chan) {
  630. sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
  631. sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
  632. bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
  633. dir_eng = DMA_MEM_TO_DEV;
  634. desc->dir = DMA_TO_DEVICE;
  635. } else {
  636. sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
  637. sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
  638. bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
  639. dir_eng = DMA_DEV_TO_MEM;
  640. desc->dir = DMA_FROM_DEVICE;
  641. }
  642. sg_mark_end(sgl + sgl_cnt - 1);
  643. ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
  644. if (ret == 0) {
  645. dev_err(nandc->dev, "failure in mapping desc\n");
  646. kfree(desc);
  647. return -ENOMEM;
  648. }
  649. desc->sgl_cnt = sgl_cnt;
  650. desc->bam_sgl = sgl;
  651. dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
  652. flags);
  653. if (!dma_desc) {
  654. dev_err(nandc->dev, "failure in prep desc\n");
  655. dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
  656. kfree(desc);
  657. return -EINVAL;
  658. }
  659. desc->dma_desc = dma_desc;
  660. list_add_tail(&desc->node, &nandc->desc_list);
  661. return 0;
  662. }
  663. /*
  664. * Prepares the command descriptor for BAM DMA which will be used for NAND
  665. * register reads and writes. The command descriptor requires the command
  666. * to be formed in command element type so this function uses the command
  667. * element from bam transaction ce array and fills the same with required
  668. * data. A single SGL can contain multiple command elements so
  669. * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
  670. * after the current command element.
  671. */
  672. static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
  673. int reg_off, const void *vaddr,
  674. int size, unsigned int flags)
  675. {
  676. int bam_ce_size;
  677. int i, ret;
  678. struct bam_cmd_element *bam_ce_buffer;
  679. struct bam_transaction *bam_txn = nandc->bam_txn;
  680. bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
  681. /* fill the command desc */
  682. for (i = 0; i < size; i++) {
  683. if (read)
  684. bam_prep_ce(&bam_ce_buffer[i],
  685. nandc_reg_phys(nandc, reg_off + 4 * i),
  686. BAM_READ_COMMAND,
  687. reg_buf_dma_addr(nandc,
  688. (__le32 *)vaddr + i));
  689. else
  690. bam_prep_ce_le32(&bam_ce_buffer[i],
  691. nandc_reg_phys(nandc, reg_off + 4 * i),
  692. BAM_WRITE_COMMAND,
  693. *((__le32 *)vaddr + i));
  694. }
  695. bam_txn->bam_ce_pos += size;
  696. /* use the separate sgl after this command */
  697. if (flags & NAND_BAM_NEXT_SGL) {
  698. bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
  699. bam_ce_size = (bam_txn->bam_ce_pos -
  700. bam_txn->bam_ce_start) *
  701. sizeof(struct bam_cmd_element);
  702. sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
  703. bam_ce_buffer, bam_ce_size);
  704. bam_txn->cmd_sgl_pos++;
  705. bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
  706. if (flags & NAND_BAM_NWD) {
  707. ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
  708. DMA_PREP_FENCE |
  709. DMA_PREP_CMD);
  710. if (ret)
  711. return ret;
  712. }
  713. }
  714. return 0;
  715. }
  716. /*
  717. * Prepares the data descriptor for BAM DMA which will be used for NAND
  718. * data reads and writes.
  719. */
  720. static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
  721. const void *vaddr,
  722. int size, unsigned int flags)
  723. {
  724. int ret;
  725. struct bam_transaction *bam_txn = nandc->bam_txn;
  726. if (read) {
  727. sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
  728. vaddr, size);
  729. bam_txn->rx_sgl_pos++;
  730. } else {
  731. sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
  732. vaddr, size);
  733. bam_txn->tx_sgl_pos++;
  734. /*
  735. * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
  736. * is not set, form the DMA descriptor
  737. */
  738. if (!(flags & NAND_BAM_NO_EOT)) {
  739. ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
  740. DMA_PREP_INTERRUPT);
  741. if (ret)
  742. return ret;
  743. }
  744. }
  745. return 0;
  746. }
  747. static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
  748. int reg_off, const void *vaddr, int size,
  749. bool flow_control)
  750. {
  751. struct desc_info *desc;
  752. struct dma_async_tx_descriptor *dma_desc;
  753. struct scatterlist *sgl;
  754. struct dma_slave_config slave_conf;
  755. enum dma_transfer_direction dir_eng;
  756. int ret;
  757. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  758. if (!desc)
  759. return -ENOMEM;
  760. sgl = &desc->adm_sgl;
  761. sg_init_one(sgl, vaddr, size);
  762. if (read) {
  763. dir_eng = DMA_DEV_TO_MEM;
  764. desc->dir = DMA_FROM_DEVICE;
  765. } else {
  766. dir_eng = DMA_MEM_TO_DEV;
  767. desc->dir = DMA_TO_DEVICE;
  768. }
  769. ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
  770. if (ret == 0) {
  771. ret = -ENOMEM;
  772. goto err;
  773. }
  774. memset(&slave_conf, 0x00, sizeof(slave_conf));
  775. slave_conf.device_fc = flow_control;
  776. if (read) {
  777. slave_conf.src_maxburst = 16;
  778. slave_conf.src_addr = nandc->base_dma + reg_off;
  779. slave_conf.slave_id = nandc->data_crci;
  780. } else {
  781. slave_conf.dst_maxburst = 16;
  782. slave_conf.dst_addr = nandc->base_dma + reg_off;
  783. slave_conf.slave_id = nandc->cmd_crci;
  784. }
  785. ret = dmaengine_slave_config(nandc->chan, &slave_conf);
  786. if (ret) {
  787. dev_err(nandc->dev, "failed to configure dma channel\n");
  788. goto err;
  789. }
  790. dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
  791. if (!dma_desc) {
  792. dev_err(nandc->dev, "failed to prepare desc\n");
  793. ret = -EINVAL;
  794. goto err;
  795. }
  796. desc->dma_desc = dma_desc;
  797. list_add_tail(&desc->node, &nandc->desc_list);
  798. return 0;
  799. err:
  800. kfree(desc);
  801. return ret;
  802. }
  803. /*
  804. * read_reg_dma: prepares a descriptor to read a given number of
  805. * contiguous registers to the reg_read_buf pointer
  806. *
  807. * @first: offset of the first register in the contiguous block
  808. * @num_regs: number of registers to read
  809. * @flags: flags to control DMA descriptor preparation
  810. */
  811. static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
  812. int num_regs, unsigned int flags)
  813. {
  814. bool flow_control = false;
  815. void *vaddr;
  816. vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
  817. nandc->reg_read_pos += num_regs;
  818. if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
  819. first = dev_cmd_reg_addr(nandc, first);
  820. if (nandc->props->is_bam)
  821. return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
  822. num_regs, flags);
  823. if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
  824. flow_control = true;
  825. return prep_adm_dma_desc(nandc, true, first, vaddr,
  826. num_regs * sizeof(u32), flow_control);
  827. }
  828. /*
  829. * write_reg_dma: prepares a descriptor to write a given number of
  830. * contiguous registers
  831. *
  832. * @first: offset of the first register in the contiguous block
  833. * @num_regs: number of registers to write
  834. * @flags: flags to control DMA descriptor preparation
  835. */
  836. static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
  837. int num_regs, unsigned int flags)
  838. {
  839. bool flow_control = false;
  840. struct nandc_regs *regs = nandc->regs;
  841. void *vaddr;
  842. vaddr = offset_to_nandc_reg(regs, first);
  843. if (first == NAND_ERASED_CW_DETECT_CFG) {
  844. if (flags & NAND_ERASED_CW_SET)
  845. vaddr = &regs->erased_cw_detect_cfg_set;
  846. else
  847. vaddr = &regs->erased_cw_detect_cfg_clr;
  848. }
  849. if (first == NAND_EXEC_CMD)
  850. flags |= NAND_BAM_NWD;
  851. if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
  852. first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
  853. if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
  854. first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
  855. if (nandc->props->is_bam)
  856. return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
  857. num_regs, flags);
  858. if (first == NAND_FLASH_CMD)
  859. flow_control = true;
  860. return prep_adm_dma_desc(nandc, false, first, vaddr,
  861. num_regs * sizeof(u32), flow_control);
  862. }
  863. /*
  864. * read_data_dma: prepares a DMA descriptor to transfer data from the
  865. * controller's internal buffer to the buffer 'vaddr'
  866. *
  867. * @reg_off: offset within the controller's data buffer
  868. * @vaddr: virtual address of the buffer we want to write to
  869. * @size: DMA transaction size in bytes
  870. * @flags: flags to control DMA descriptor preparation
  871. */
  872. static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  873. const u8 *vaddr, int size, unsigned int flags)
  874. {
  875. if (nandc->props->is_bam)
  876. return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
  877. return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
  878. }
  879. /*
  880. * write_data_dma: prepares a DMA descriptor to transfer data from
  881. * 'vaddr' to the controller's internal buffer
  882. *
  883. * @reg_off: offset within the controller's data buffer
  884. * @vaddr: virtual address of the buffer we want to read from
  885. * @size: DMA transaction size in bytes
  886. * @flags: flags to control DMA descriptor preparation
  887. */
  888. static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  889. const u8 *vaddr, int size, unsigned int flags)
  890. {
  891. if (nandc->props->is_bam)
  892. return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
  893. return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
  894. }
  895. /*
  896. * Helper to prepare DMA descriptors for configuring registers
  897. * before reading a NAND page.
  898. */
  899. static void config_nand_page_read(struct qcom_nand_controller *nandc)
  900. {
  901. write_reg_dma(nandc, NAND_ADDR0, 2, 0);
  902. write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
  903. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
  904. write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
  905. write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
  906. NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
  907. }
  908. /*
  909. * Helper to prepare DMA descriptors for configuring registers
  910. * before reading each codeword in NAND page.
  911. */
  912. static void config_nand_cw_read(struct qcom_nand_controller *nandc)
  913. {
  914. if (nandc->props->is_bam)
  915. write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
  916. NAND_BAM_NEXT_SGL);
  917. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  918. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  919. read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
  920. read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
  921. NAND_BAM_NEXT_SGL);
  922. }
  923. /*
  924. * Helper to prepare dma descriptors to configure registers needed for reading a
  925. * single codeword in page
  926. */
  927. static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
  928. {
  929. config_nand_page_read(nandc);
  930. config_nand_cw_read(nandc);
  931. }
  932. /*
  933. * Helper to prepare DMA descriptors used to configure registers needed for
  934. * before writing a NAND page.
  935. */
  936. static void config_nand_page_write(struct qcom_nand_controller *nandc)
  937. {
  938. write_reg_dma(nandc, NAND_ADDR0, 2, 0);
  939. write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
  940. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
  941. NAND_BAM_NEXT_SGL);
  942. }
  943. /*
  944. * Helper to prepare DMA descriptors for configuring registers
  945. * before writing each codeword in NAND page.
  946. */
  947. static void config_nand_cw_write(struct qcom_nand_controller *nandc)
  948. {
  949. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  950. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  951. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  952. write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
  953. write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
  954. }
  955. /*
  956. * the following functions are used within chip->cmdfunc() to perform different
  957. * NAND_CMD_* commands
  958. */
  959. /* sets up descriptors for NAND_CMD_PARAM */
  960. static int nandc_param(struct qcom_nand_host *host)
  961. {
  962. struct nand_chip *chip = &host->chip;
  963. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  964. /*
  965. * NAND_CMD_PARAM is called before we know much about the FLASH chip
  966. * in use. we configure the controller to perform a raw read of 512
  967. * bytes to read onfi params
  968. */
  969. nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
  970. nandc_set_reg(nandc, NAND_ADDR0, 0);
  971. nandc_set_reg(nandc, NAND_ADDR1, 0);
  972. nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
  973. | 512 << UD_SIZE_BYTES
  974. | 5 << NUM_ADDR_CYCLES
  975. | 0 << SPARE_SIZE_BYTES);
  976. nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
  977. | 0 << CS_ACTIVE_BSY
  978. | 17 << BAD_BLOCK_BYTE_NUM
  979. | 1 << BAD_BLOCK_IN_SPARE_AREA
  980. | 2 << WR_RD_BSY_GAP
  981. | 0 << WIDE_FLASH
  982. | 1 << DEV0_CFG1_ECC_DISABLE);
  983. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
  984. /* configure CMD1 and VLD for ONFI param probing */
  985. nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
  986. (nandc->vld & ~READ_START_VLD));
  987. nandc_set_reg(nandc, NAND_DEV_CMD1,
  988. (nandc->cmd1 & ~(0xFF << READ_ADDR))
  989. | NAND_CMD_PARAM << READ_ADDR);
  990. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  991. nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
  992. nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
  993. nandc_set_read_loc(nandc, 0, 0, 512, 1);
  994. write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
  995. write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
  996. nandc->buf_count = 512;
  997. memset(nandc->data_buffer, 0xff, nandc->buf_count);
  998. config_nand_single_cw_page_read(nandc);
  999. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  1000. nandc->buf_count, 0);
  1001. /* restore CMD1 and VLD regs */
  1002. write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
  1003. write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
  1004. return 0;
  1005. }
  1006. /* sets up descriptors for NAND_CMD_ERASE1 */
  1007. static int erase_block(struct qcom_nand_host *host, int page_addr)
  1008. {
  1009. struct nand_chip *chip = &host->chip;
  1010. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1011. nandc_set_reg(nandc, NAND_FLASH_CMD,
  1012. BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
  1013. nandc_set_reg(nandc, NAND_ADDR0, page_addr);
  1014. nandc_set_reg(nandc, NAND_ADDR1, 0);
  1015. nandc_set_reg(nandc, NAND_DEV0_CFG0,
  1016. host->cfg0_raw & ~(7 << CW_PER_PAGE));
  1017. nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
  1018. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1019. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  1020. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  1021. write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
  1022. write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
  1023. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1024. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  1025. write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
  1026. write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
  1027. return 0;
  1028. }
  1029. /* sets up descriptors for NAND_CMD_READID */
  1030. static int read_id(struct qcom_nand_host *host, int column)
  1031. {
  1032. struct nand_chip *chip = &host->chip;
  1033. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1034. if (column == -1)
  1035. return 0;
  1036. nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
  1037. nandc_set_reg(nandc, NAND_ADDR0, column);
  1038. nandc_set_reg(nandc, NAND_ADDR1, 0);
  1039. nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
  1040. nandc->props->is_bam ? 0 : DM_EN);
  1041. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1042. write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
  1043. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1044. read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
  1045. return 0;
  1046. }
  1047. /* sets up descriptors for NAND_CMD_RESET */
  1048. static int reset(struct qcom_nand_host *host)
  1049. {
  1050. struct nand_chip *chip = &host->chip;
  1051. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1052. nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
  1053. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1054. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  1055. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1056. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  1057. return 0;
  1058. }
  1059. /* helpers to submit/free our list of dma descriptors */
  1060. static int submit_descs(struct qcom_nand_controller *nandc)
  1061. {
  1062. struct desc_info *desc;
  1063. dma_cookie_t cookie = 0;
  1064. struct bam_transaction *bam_txn = nandc->bam_txn;
  1065. int r;
  1066. if (nandc->props->is_bam) {
  1067. if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
  1068. r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
  1069. if (r)
  1070. return r;
  1071. }
  1072. if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
  1073. r = prepare_bam_async_desc(nandc, nandc->tx_chan,
  1074. DMA_PREP_INTERRUPT);
  1075. if (r)
  1076. return r;
  1077. }
  1078. if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
  1079. r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
  1080. DMA_PREP_CMD);
  1081. if (r)
  1082. return r;
  1083. }
  1084. }
  1085. list_for_each_entry(desc, &nandc->desc_list, node)
  1086. cookie = dmaengine_submit(desc->dma_desc);
  1087. if (nandc->props->is_bam) {
  1088. dma_async_issue_pending(nandc->tx_chan);
  1089. dma_async_issue_pending(nandc->rx_chan);
  1090. if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
  1091. return -ETIMEDOUT;
  1092. } else {
  1093. if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
  1094. return -ETIMEDOUT;
  1095. }
  1096. return 0;
  1097. }
  1098. static void free_descs(struct qcom_nand_controller *nandc)
  1099. {
  1100. struct desc_info *desc, *n;
  1101. list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
  1102. list_del(&desc->node);
  1103. if (nandc->props->is_bam)
  1104. dma_unmap_sg(nandc->dev, desc->bam_sgl,
  1105. desc->sgl_cnt, desc->dir);
  1106. else
  1107. dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
  1108. desc->dir);
  1109. kfree(desc);
  1110. }
  1111. }
  1112. /* reset the register read buffer for next NAND operation */
  1113. static void clear_read_regs(struct qcom_nand_controller *nandc)
  1114. {
  1115. nandc->reg_read_pos = 0;
  1116. nandc_read_buffer_sync(nandc, false);
  1117. }
  1118. static void pre_command(struct qcom_nand_host *host, int command)
  1119. {
  1120. struct nand_chip *chip = &host->chip;
  1121. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1122. nandc->buf_count = 0;
  1123. nandc->buf_start = 0;
  1124. host->use_ecc = false;
  1125. host->last_command = command;
  1126. clear_read_regs(nandc);
  1127. if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
  1128. command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
  1129. clear_bam_transaction(nandc);
  1130. }
  1131. /*
  1132. * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
  1133. * privately maintained status byte, this status byte can be read after
  1134. * NAND_CMD_STATUS is called
  1135. */
  1136. static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
  1137. {
  1138. struct nand_chip *chip = &host->chip;
  1139. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1140. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1141. int num_cw;
  1142. int i;
  1143. num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
  1144. nandc_read_buffer_sync(nandc, true);
  1145. for (i = 0; i < num_cw; i++) {
  1146. u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
  1147. if (flash_status & FS_MPU_ERR)
  1148. host->status &= ~NAND_STATUS_WP;
  1149. if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
  1150. (flash_status &
  1151. FS_DEVICE_STS_ERR)))
  1152. host->status |= NAND_STATUS_FAIL;
  1153. }
  1154. }
  1155. static void post_command(struct qcom_nand_host *host, int command)
  1156. {
  1157. struct nand_chip *chip = &host->chip;
  1158. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1159. switch (command) {
  1160. case NAND_CMD_READID:
  1161. nandc_read_buffer_sync(nandc, true);
  1162. memcpy(nandc->data_buffer, nandc->reg_read_buf,
  1163. nandc->buf_count);
  1164. break;
  1165. case NAND_CMD_PAGEPROG:
  1166. case NAND_CMD_ERASE1:
  1167. parse_erase_write_errors(host, command);
  1168. break;
  1169. default:
  1170. break;
  1171. }
  1172. }
  1173. /*
  1174. * Implements chip->cmdfunc. It's only used for a limited set of commands.
  1175. * The rest of the commands wouldn't be called by upper layers. For example,
  1176. * NAND_CMD_READOOB would never be called because we have our own versions
  1177. * of read_oob ops for nand_ecc_ctrl.
  1178. */
  1179. static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
  1180. int column, int page_addr)
  1181. {
  1182. struct nand_chip *chip = mtd_to_nand(mtd);
  1183. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1184. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1185. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1186. bool wait = false;
  1187. int ret = 0;
  1188. pre_command(host, command);
  1189. switch (command) {
  1190. case NAND_CMD_RESET:
  1191. ret = reset(host);
  1192. wait = true;
  1193. break;
  1194. case NAND_CMD_READID:
  1195. nandc->buf_count = 4;
  1196. ret = read_id(host, column);
  1197. wait = true;
  1198. break;
  1199. case NAND_CMD_PARAM:
  1200. ret = nandc_param(host);
  1201. wait = true;
  1202. break;
  1203. case NAND_CMD_ERASE1:
  1204. ret = erase_block(host, page_addr);
  1205. wait = true;
  1206. break;
  1207. case NAND_CMD_READ0:
  1208. /* we read the entire page for now */
  1209. WARN_ON(column != 0);
  1210. host->use_ecc = true;
  1211. set_address(host, 0, page_addr);
  1212. update_rw_regs(host, ecc->steps, true);
  1213. break;
  1214. case NAND_CMD_SEQIN:
  1215. WARN_ON(column != 0);
  1216. set_address(host, 0, page_addr);
  1217. break;
  1218. case NAND_CMD_PAGEPROG:
  1219. case NAND_CMD_STATUS:
  1220. case NAND_CMD_NONE:
  1221. default:
  1222. break;
  1223. }
  1224. if (ret) {
  1225. dev_err(nandc->dev, "failure executing command %d\n",
  1226. command);
  1227. free_descs(nandc);
  1228. return;
  1229. }
  1230. if (wait) {
  1231. ret = submit_descs(nandc);
  1232. if (ret)
  1233. dev_err(nandc->dev,
  1234. "failure submitting descs for command %d\n",
  1235. command);
  1236. }
  1237. free_descs(nandc);
  1238. post_command(host, command);
  1239. }
  1240. /*
  1241. * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
  1242. * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
  1243. *
  1244. * when using RS ECC, the HW reports the same erros when reading an erased CW,
  1245. * but it notifies that it is an erased CW by placing special characters at
  1246. * certain offsets in the buffer.
  1247. *
  1248. * verify if the page is erased or not, and fix up the page for RS ECC by
  1249. * replacing the special characters with 0xff.
  1250. */
  1251. static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
  1252. {
  1253. u8 empty1, empty2;
  1254. /*
  1255. * an erased page flags an error in NAND_FLASH_STATUS, check if the page
  1256. * is erased by looking for 0x54s at offsets 3 and 175 from the
  1257. * beginning of each codeword
  1258. */
  1259. empty1 = data_buf[3];
  1260. empty2 = data_buf[175];
  1261. /*
  1262. * if the erased codework markers, if they exist override them with
  1263. * 0xffs
  1264. */
  1265. if ((empty1 == 0x54 && empty2 == 0xff) ||
  1266. (empty1 == 0xff && empty2 == 0x54)) {
  1267. data_buf[3] = 0xff;
  1268. data_buf[175] = 0xff;
  1269. }
  1270. /*
  1271. * check if the entire chunk contains 0xffs or not. if it doesn't, then
  1272. * restore the original values at the special offsets
  1273. */
  1274. if (memchr_inv(data_buf, 0xff, data_len)) {
  1275. data_buf[3] = empty1;
  1276. data_buf[175] = empty2;
  1277. return false;
  1278. }
  1279. return true;
  1280. }
  1281. struct read_stats {
  1282. __le32 flash;
  1283. __le32 buffer;
  1284. __le32 erased_cw;
  1285. };
  1286. /*
  1287. * reads back status registers set by the controller to notify page read
  1288. * errors. this is equivalent to what 'ecc->correct()' would do.
  1289. */
  1290. static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
  1291. u8 *oob_buf)
  1292. {
  1293. struct nand_chip *chip = &host->chip;
  1294. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1295. struct mtd_info *mtd = nand_to_mtd(chip);
  1296. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1297. unsigned int max_bitflips = 0;
  1298. struct read_stats *buf;
  1299. int i;
  1300. buf = (struct read_stats *)nandc->reg_read_buf;
  1301. nandc_read_buffer_sync(nandc, true);
  1302. for (i = 0; i < ecc->steps; i++, buf++) {
  1303. u32 flash, buffer, erased_cw;
  1304. int data_len, oob_len;
  1305. if (i == (ecc->steps - 1)) {
  1306. data_len = ecc->size - ((ecc->steps - 1) << 2);
  1307. oob_len = ecc->steps << 2;
  1308. } else {
  1309. data_len = host->cw_data;
  1310. oob_len = 0;
  1311. }
  1312. flash = le32_to_cpu(buf->flash);
  1313. buffer = le32_to_cpu(buf->buffer);
  1314. erased_cw = le32_to_cpu(buf->erased_cw);
  1315. if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
  1316. bool erased;
  1317. /* ignore erased codeword errors */
  1318. if (host->bch_enabled) {
  1319. erased = (erased_cw & ERASED_CW) == ERASED_CW ?
  1320. true : false;
  1321. } else {
  1322. erased = erased_chunk_check_and_fixup(data_buf,
  1323. data_len);
  1324. }
  1325. if (erased) {
  1326. data_buf += data_len;
  1327. if (oob_buf)
  1328. oob_buf += oob_len + ecc->bytes;
  1329. continue;
  1330. }
  1331. if (buffer & BS_UNCORRECTABLE_BIT) {
  1332. int ret, ecclen, extraooblen;
  1333. void *eccbuf;
  1334. eccbuf = oob_buf ? oob_buf + oob_len : NULL;
  1335. ecclen = oob_buf ? host->ecc_bytes_hw : 0;
  1336. extraooblen = oob_buf ? oob_len : 0;
  1337. /*
  1338. * make sure it isn't an erased page reported
  1339. * as not-erased by HW because of a few bitflips
  1340. */
  1341. ret = nand_check_erased_ecc_chunk(data_buf,
  1342. data_len, eccbuf, ecclen, oob_buf,
  1343. extraooblen, ecc->strength);
  1344. if (ret < 0) {
  1345. mtd->ecc_stats.failed++;
  1346. } else {
  1347. mtd->ecc_stats.corrected += ret;
  1348. max_bitflips =
  1349. max_t(unsigned int, max_bitflips, ret);
  1350. }
  1351. }
  1352. } else {
  1353. unsigned int stat;
  1354. stat = buffer & BS_CORRECTABLE_ERR_MSK;
  1355. mtd->ecc_stats.corrected += stat;
  1356. max_bitflips = max(max_bitflips, stat);
  1357. }
  1358. data_buf += data_len;
  1359. if (oob_buf)
  1360. oob_buf += oob_len + ecc->bytes;
  1361. }
  1362. return max_bitflips;
  1363. }
  1364. /*
  1365. * helper to perform the actual page read operation, used by ecc->read_page(),
  1366. * ecc->read_oob()
  1367. */
  1368. static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
  1369. u8 *oob_buf)
  1370. {
  1371. struct nand_chip *chip = &host->chip;
  1372. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1373. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1374. int i, ret;
  1375. config_nand_page_read(nandc);
  1376. /* queue cmd descs for each codeword */
  1377. for (i = 0; i < ecc->steps; i++) {
  1378. int data_size, oob_size;
  1379. if (i == (ecc->steps - 1)) {
  1380. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1381. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1382. host->spare_bytes;
  1383. } else {
  1384. data_size = host->cw_data;
  1385. oob_size = host->ecc_bytes_hw + host->spare_bytes;
  1386. }
  1387. if (nandc->props->is_bam) {
  1388. if (data_buf && oob_buf) {
  1389. nandc_set_read_loc(nandc, 0, 0, data_size, 0);
  1390. nandc_set_read_loc(nandc, 1, data_size,
  1391. oob_size, 1);
  1392. } else if (data_buf) {
  1393. nandc_set_read_loc(nandc, 0, 0, data_size, 1);
  1394. } else {
  1395. nandc_set_read_loc(nandc, 0, data_size,
  1396. oob_size, 1);
  1397. }
  1398. }
  1399. config_nand_cw_read(nandc);
  1400. if (data_buf)
  1401. read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
  1402. data_size, 0);
  1403. /*
  1404. * when ecc is enabled, the controller doesn't read the real
  1405. * or dummy bad block markers in each chunk. To maintain a
  1406. * consistent layout across RAW and ECC reads, we just
  1407. * leave the real/dummy BBM offsets empty (i.e, filled with
  1408. * 0xffs)
  1409. */
  1410. if (oob_buf) {
  1411. int j;
  1412. for (j = 0; j < host->bbm_size; j++)
  1413. *oob_buf++ = 0xff;
  1414. read_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1415. oob_buf, oob_size, 0);
  1416. }
  1417. if (data_buf)
  1418. data_buf += data_size;
  1419. if (oob_buf)
  1420. oob_buf += oob_size;
  1421. }
  1422. ret = submit_descs(nandc);
  1423. if (ret)
  1424. dev_err(nandc->dev, "failure to read page/oob\n");
  1425. free_descs(nandc);
  1426. return ret;
  1427. }
  1428. /*
  1429. * a helper that copies the last step/codeword of a page (containing free oob)
  1430. * into our local buffer
  1431. */
  1432. static int copy_last_cw(struct qcom_nand_host *host, int page)
  1433. {
  1434. struct nand_chip *chip = &host->chip;
  1435. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1436. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1437. int size;
  1438. int ret;
  1439. clear_read_regs(nandc);
  1440. size = host->use_ecc ? host->cw_data : host->cw_size;
  1441. /* prepare a clean read buffer */
  1442. memset(nandc->data_buffer, 0xff, size);
  1443. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1444. update_rw_regs(host, 1, true);
  1445. config_nand_single_cw_page_read(nandc);
  1446. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
  1447. ret = submit_descs(nandc);
  1448. if (ret)
  1449. dev_err(nandc->dev, "failed to copy last codeword\n");
  1450. free_descs(nandc);
  1451. return ret;
  1452. }
  1453. /* implements ecc->read_page() */
  1454. static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  1455. uint8_t *buf, int oob_required, int page)
  1456. {
  1457. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1458. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1459. u8 *data_buf, *oob_buf = NULL;
  1460. int ret;
  1461. nand_read_page_op(chip, page, 0, NULL, 0);
  1462. data_buf = buf;
  1463. oob_buf = oob_required ? chip->oob_poi : NULL;
  1464. clear_bam_transaction(nandc);
  1465. ret = read_page_ecc(host, data_buf, oob_buf);
  1466. if (ret) {
  1467. dev_err(nandc->dev, "failure to read page\n");
  1468. return ret;
  1469. }
  1470. return parse_read_errors(host, data_buf, oob_buf);
  1471. }
  1472. /* implements ecc->read_page_raw() */
  1473. static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
  1474. struct nand_chip *chip, uint8_t *buf,
  1475. int oob_required, int page)
  1476. {
  1477. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1478. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1479. u8 *data_buf, *oob_buf;
  1480. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1481. int i, ret;
  1482. int read_loc;
  1483. nand_read_page_op(chip, page, 0, NULL, 0);
  1484. data_buf = buf;
  1485. oob_buf = chip->oob_poi;
  1486. host->use_ecc = false;
  1487. clear_bam_transaction(nandc);
  1488. update_rw_regs(host, ecc->steps, true);
  1489. config_nand_page_read(nandc);
  1490. for (i = 0; i < ecc->steps; i++) {
  1491. int data_size1, data_size2, oob_size1, oob_size2;
  1492. int reg_off = FLASH_BUF_ACC;
  1493. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1494. oob_size1 = host->bbm_size;
  1495. if (i == (ecc->steps - 1)) {
  1496. data_size2 = ecc->size - data_size1 -
  1497. ((ecc->steps - 1) << 2);
  1498. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1499. host->spare_bytes;
  1500. } else {
  1501. data_size2 = host->cw_data - data_size1;
  1502. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1503. }
  1504. if (nandc->props->is_bam) {
  1505. read_loc = 0;
  1506. nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
  1507. read_loc += data_size1;
  1508. nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
  1509. read_loc += oob_size1;
  1510. nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
  1511. read_loc += data_size2;
  1512. nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
  1513. }
  1514. config_nand_cw_read(nandc);
  1515. read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
  1516. reg_off += data_size1;
  1517. data_buf += data_size1;
  1518. read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
  1519. reg_off += oob_size1;
  1520. oob_buf += oob_size1;
  1521. read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
  1522. reg_off += data_size2;
  1523. data_buf += data_size2;
  1524. read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
  1525. oob_buf += oob_size2;
  1526. }
  1527. ret = submit_descs(nandc);
  1528. if (ret)
  1529. dev_err(nandc->dev, "failure to read raw page\n");
  1530. free_descs(nandc);
  1531. return 0;
  1532. }
  1533. /* implements ecc->read_oob() */
  1534. static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1535. int page)
  1536. {
  1537. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1538. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1539. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1540. int ret;
  1541. clear_read_regs(nandc);
  1542. clear_bam_transaction(nandc);
  1543. host->use_ecc = true;
  1544. set_address(host, 0, page);
  1545. update_rw_regs(host, ecc->steps, true);
  1546. ret = read_page_ecc(host, NULL, chip->oob_poi);
  1547. if (ret)
  1548. dev_err(nandc->dev, "failure to read oob\n");
  1549. return ret;
  1550. }
  1551. /* implements ecc->write_page() */
  1552. static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1553. const uint8_t *buf, int oob_required, int page)
  1554. {
  1555. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1556. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1557. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1558. u8 *data_buf, *oob_buf;
  1559. int i, ret;
  1560. nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  1561. clear_read_regs(nandc);
  1562. clear_bam_transaction(nandc);
  1563. data_buf = (u8 *)buf;
  1564. oob_buf = chip->oob_poi;
  1565. host->use_ecc = true;
  1566. update_rw_regs(host, ecc->steps, false);
  1567. config_nand_page_write(nandc);
  1568. for (i = 0; i < ecc->steps; i++) {
  1569. int data_size, oob_size;
  1570. if (i == (ecc->steps - 1)) {
  1571. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1572. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1573. host->spare_bytes;
  1574. } else {
  1575. data_size = host->cw_data;
  1576. oob_size = ecc->bytes;
  1577. }
  1578. write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
  1579. i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
  1580. /*
  1581. * when ECC is enabled, we don't really need to write anything
  1582. * to oob for the first n - 1 codewords since these oob regions
  1583. * just contain ECC bytes that's written by the controller
  1584. * itself. For the last codeword, we skip the bbm positions and
  1585. * write to the free oob area.
  1586. */
  1587. if (i == (ecc->steps - 1)) {
  1588. oob_buf += host->bbm_size;
  1589. write_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1590. oob_buf, oob_size, 0);
  1591. }
  1592. config_nand_cw_write(nandc);
  1593. data_buf += data_size;
  1594. oob_buf += oob_size;
  1595. }
  1596. ret = submit_descs(nandc);
  1597. if (ret)
  1598. dev_err(nandc->dev, "failure to write page\n");
  1599. free_descs(nandc);
  1600. if (!ret)
  1601. ret = nand_prog_page_end_op(chip);
  1602. return ret;
  1603. }
  1604. /* implements ecc->write_page_raw() */
  1605. static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
  1606. struct nand_chip *chip, const uint8_t *buf,
  1607. int oob_required, int page)
  1608. {
  1609. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1610. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1611. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1612. u8 *data_buf, *oob_buf;
  1613. int i, ret;
  1614. nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  1615. clear_read_regs(nandc);
  1616. clear_bam_transaction(nandc);
  1617. data_buf = (u8 *)buf;
  1618. oob_buf = chip->oob_poi;
  1619. host->use_ecc = false;
  1620. update_rw_regs(host, ecc->steps, false);
  1621. config_nand_page_write(nandc);
  1622. for (i = 0; i < ecc->steps; i++) {
  1623. int data_size1, data_size2, oob_size1, oob_size2;
  1624. int reg_off = FLASH_BUF_ACC;
  1625. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1626. oob_size1 = host->bbm_size;
  1627. if (i == (ecc->steps - 1)) {
  1628. data_size2 = ecc->size - data_size1 -
  1629. ((ecc->steps - 1) << 2);
  1630. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1631. host->spare_bytes;
  1632. } else {
  1633. data_size2 = host->cw_data - data_size1;
  1634. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1635. }
  1636. write_data_dma(nandc, reg_off, data_buf, data_size1,
  1637. NAND_BAM_NO_EOT);
  1638. reg_off += data_size1;
  1639. data_buf += data_size1;
  1640. write_data_dma(nandc, reg_off, oob_buf, oob_size1,
  1641. NAND_BAM_NO_EOT);
  1642. reg_off += oob_size1;
  1643. oob_buf += oob_size1;
  1644. write_data_dma(nandc, reg_off, data_buf, data_size2,
  1645. NAND_BAM_NO_EOT);
  1646. reg_off += data_size2;
  1647. data_buf += data_size2;
  1648. write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
  1649. oob_buf += oob_size2;
  1650. config_nand_cw_write(nandc);
  1651. }
  1652. ret = submit_descs(nandc);
  1653. if (ret)
  1654. dev_err(nandc->dev, "failure to write raw page\n");
  1655. free_descs(nandc);
  1656. if (!ret)
  1657. ret = nand_prog_page_end_op(chip);
  1658. return ret;
  1659. }
  1660. /*
  1661. * implements ecc->write_oob()
  1662. *
  1663. * the NAND controller cannot write only data or only oob within a codeword,
  1664. * since ecc is calculated for the combined codeword. we first copy the
  1665. * entire contents for the last codeword(data + oob), replace the old oob
  1666. * with the new one in chip->oob_poi, and then write the entire codeword.
  1667. * this read-copy-write operation results in a slight performance loss.
  1668. */
  1669. static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1670. int page)
  1671. {
  1672. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1673. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1674. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1675. u8 *oob = chip->oob_poi;
  1676. int data_size, oob_size;
  1677. int ret;
  1678. host->use_ecc = true;
  1679. clear_bam_transaction(nandc);
  1680. ret = copy_last_cw(host, page);
  1681. if (ret)
  1682. return ret;
  1683. clear_read_regs(nandc);
  1684. clear_bam_transaction(nandc);
  1685. /* calculate the data and oob size for the last codeword/step */
  1686. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1687. oob_size = mtd->oobavail;
  1688. /* override new oob content to last codeword */
  1689. mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
  1690. 0, mtd->oobavail);
  1691. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1692. update_rw_regs(host, 1, false);
  1693. config_nand_page_write(nandc);
  1694. write_data_dma(nandc, FLASH_BUF_ACC,
  1695. nandc->data_buffer, data_size + oob_size, 0);
  1696. config_nand_cw_write(nandc);
  1697. ret = submit_descs(nandc);
  1698. free_descs(nandc);
  1699. if (ret) {
  1700. dev_err(nandc->dev, "failure to write oob\n");
  1701. return -EIO;
  1702. }
  1703. return nand_prog_page_end_op(chip);
  1704. }
  1705. static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
  1706. {
  1707. struct nand_chip *chip = mtd_to_nand(mtd);
  1708. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1709. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1710. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1711. int page, ret, bbpos, bad = 0;
  1712. u32 flash_status;
  1713. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1714. /*
  1715. * configure registers for a raw sub page read, the address is set to
  1716. * the beginning of the last codeword, we don't care about reading ecc
  1717. * portion of oob. we just want the first few bytes from this codeword
  1718. * that contains the BBM
  1719. */
  1720. host->use_ecc = false;
  1721. clear_bam_transaction(nandc);
  1722. ret = copy_last_cw(host, page);
  1723. if (ret)
  1724. goto err;
  1725. flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
  1726. if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
  1727. dev_warn(nandc->dev, "error when trying to read BBM\n");
  1728. goto err;
  1729. }
  1730. bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1731. bad = nandc->data_buffer[bbpos] != 0xff;
  1732. if (chip->options & NAND_BUSWIDTH_16)
  1733. bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
  1734. err:
  1735. return bad;
  1736. }
  1737. static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
  1738. {
  1739. struct nand_chip *chip = mtd_to_nand(mtd);
  1740. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1741. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1742. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1743. int page, ret;
  1744. clear_read_regs(nandc);
  1745. clear_bam_transaction(nandc);
  1746. /*
  1747. * to mark the BBM as bad, we flash the entire last codeword with 0s.
  1748. * we don't care about the rest of the content in the codeword since
  1749. * we aren't going to use this block again
  1750. */
  1751. memset(nandc->data_buffer, 0x00, host->cw_size);
  1752. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1753. /* prepare write */
  1754. host->use_ecc = false;
  1755. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1756. update_rw_regs(host, 1, false);
  1757. config_nand_page_write(nandc);
  1758. write_data_dma(nandc, FLASH_BUF_ACC,
  1759. nandc->data_buffer, host->cw_size, 0);
  1760. config_nand_cw_write(nandc);
  1761. ret = submit_descs(nandc);
  1762. free_descs(nandc);
  1763. if (ret) {
  1764. dev_err(nandc->dev, "failure to update BBM\n");
  1765. return -EIO;
  1766. }
  1767. return nand_prog_page_end_op(chip);
  1768. }
  1769. /*
  1770. * the three functions below implement chip->read_byte(), chip->read_buf()
  1771. * and chip->write_buf() respectively. these aren't used for
  1772. * reading/writing page data, they are used for smaller data like reading
  1773. * id, status etc
  1774. */
  1775. static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
  1776. {
  1777. struct nand_chip *chip = mtd_to_nand(mtd);
  1778. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1779. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1780. u8 *buf = nandc->data_buffer;
  1781. u8 ret = 0x0;
  1782. if (host->last_command == NAND_CMD_STATUS) {
  1783. ret = host->status;
  1784. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1785. return ret;
  1786. }
  1787. if (nandc->buf_start < nandc->buf_count)
  1788. ret = buf[nandc->buf_start++];
  1789. return ret;
  1790. }
  1791. static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  1792. {
  1793. struct nand_chip *chip = mtd_to_nand(mtd);
  1794. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1795. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1796. memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
  1797. nandc->buf_start += real_len;
  1798. }
  1799. static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  1800. int len)
  1801. {
  1802. struct nand_chip *chip = mtd_to_nand(mtd);
  1803. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1804. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1805. memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
  1806. nandc->buf_start += real_len;
  1807. }
  1808. /* we support only one external chip for now */
  1809. static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
  1810. {
  1811. struct nand_chip *chip = mtd_to_nand(mtd);
  1812. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1813. if (chipnr <= 0)
  1814. return;
  1815. dev_warn(nandc->dev, "invalid chip select\n");
  1816. }
  1817. /*
  1818. * NAND controller page layout info
  1819. *
  1820. * Layout with ECC enabled:
  1821. *
  1822. * |----------------------| |---------------------------------|
  1823. * | xx.......yy| | *********xx.......yy|
  1824. * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
  1825. * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
  1826. * | xx.......yy| | *********xx.......yy|
  1827. * |----------------------| |---------------------------------|
  1828. * codeword 1,2..n-1 codeword n
  1829. * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
  1830. *
  1831. * n = Number of codewords in the page
  1832. * . = ECC bytes
  1833. * * = Spare/free bytes
  1834. * x = Unused byte(s)
  1835. * y = Reserved byte(s)
  1836. *
  1837. * 2K page: n = 4, spare = 16 bytes
  1838. * 4K page: n = 8, spare = 32 bytes
  1839. * 8K page: n = 16, spare = 64 bytes
  1840. *
  1841. * the qcom nand controller operates at a sub page/codeword level. each
  1842. * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
  1843. * the number of ECC bytes vary based on the ECC strength and the bus width.
  1844. *
  1845. * the first n - 1 codewords contains 516 bytes of user data, the remaining
  1846. * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
  1847. * both user data and spare(oobavail) bytes that sum up to 516 bytes.
  1848. *
  1849. * When we access a page with ECC enabled, the reserved bytes(s) are not
  1850. * accessible at all. When reading, we fill up these unreadable positions
  1851. * with 0xffs. When writing, the controller skips writing the inaccessible
  1852. * bytes.
  1853. *
  1854. * Layout with ECC disabled:
  1855. *
  1856. * |------------------------------| |---------------------------------------|
  1857. * | yy xx.......| | bb *********xx.......|
  1858. * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
  1859. * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
  1860. * | yy xx.......| | bb *********xx.......|
  1861. * |------------------------------| |---------------------------------------|
  1862. * codeword 1,2..n-1 codeword n
  1863. * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
  1864. *
  1865. * n = Number of codewords in the page
  1866. * . = ECC bytes
  1867. * * = Spare/free bytes
  1868. * x = Unused byte(s)
  1869. * y = Dummy Bad Bock byte(s)
  1870. * b = Real Bad Block byte(s)
  1871. * size1/size2 = function of codeword size and 'n'
  1872. *
  1873. * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
  1874. * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
  1875. * Block Markers. In the last codeword, this position contains the real BBM
  1876. *
  1877. * In order to have a consistent layout between RAW and ECC modes, we assume
  1878. * the following OOB layout arrangement:
  1879. *
  1880. * |-----------| |--------------------|
  1881. * |yyxx.......| |bb*********xx.......|
  1882. * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
  1883. * |yyxx.......| |bb*********xx.......|
  1884. * |yyxx.......| |bb*********xx.......|
  1885. * |-----------| |--------------------|
  1886. * first n - 1 nth OOB region
  1887. * OOB regions
  1888. *
  1889. * n = Number of codewords in the page
  1890. * . = ECC bytes
  1891. * * = FREE OOB bytes
  1892. * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
  1893. * x = Unused byte(s)
  1894. * b = Real bad block byte(s) (inaccessible when ECC enabled)
  1895. *
  1896. * This layout is read as is when ECC is disabled. When ECC is enabled, the
  1897. * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
  1898. * and assumed as 0xffs when we read a page/oob. The ECC, unused and
  1899. * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
  1900. * the sum of the three).
  1901. */
  1902. static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  1903. struct mtd_oob_region *oobregion)
  1904. {
  1905. struct nand_chip *chip = mtd_to_nand(mtd);
  1906. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1907. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1908. if (section > 1)
  1909. return -ERANGE;
  1910. if (!section) {
  1911. oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
  1912. host->bbm_size;
  1913. oobregion->offset = 0;
  1914. } else {
  1915. oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
  1916. oobregion->offset = mtd->oobsize - oobregion->length;
  1917. }
  1918. return 0;
  1919. }
  1920. static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
  1921. struct mtd_oob_region *oobregion)
  1922. {
  1923. struct nand_chip *chip = mtd_to_nand(mtd);
  1924. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1925. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1926. if (section)
  1927. return -ERANGE;
  1928. oobregion->length = ecc->steps * 4;
  1929. oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
  1930. return 0;
  1931. }
  1932. static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
  1933. .ecc = qcom_nand_ooblayout_ecc,
  1934. .free = qcom_nand_ooblayout_free,
  1935. };
  1936. static int qcom_nand_host_setup(struct qcom_nand_host *host)
  1937. {
  1938. struct nand_chip *chip = &host->chip;
  1939. struct mtd_info *mtd = nand_to_mtd(chip);
  1940. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1941. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1942. int cwperpage, bad_block_byte;
  1943. bool wide_bus;
  1944. int ecc_mode = 1;
  1945. /*
  1946. * the controller requires each step consists of 512 bytes of data.
  1947. * bail out if DT has populated a wrong step size.
  1948. */
  1949. if (ecc->size != NANDC_STEP_SIZE) {
  1950. dev_err(nandc->dev, "invalid ecc size\n");
  1951. return -EINVAL;
  1952. }
  1953. wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
  1954. if (ecc->strength >= 8) {
  1955. /* 8 bit ECC defaults to BCH ECC on all platforms */
  1956. host->bch_enabled = true;
  1957. ecc_mode = 1;
  1958. if (wide_bus) {
  1959. host->ecc_bytes_hw = 14;
  1960. host->spare_bytes = 0;
  1961. host->bbm_size = 2;
  1962. } else {
  1963. host->ecc_bytes_hw = 13;
  1964. host->spare_bytes = 2;
  1965. host->bbm_size = 1;
  1966. }
  1967. } else {
  1968. /*
  1969. * if the controller supports BCH for 4 bit ECC, the controller
  1970. * uses lesser bytes for ECC. If RS is used, the ECC bytes is
  1971. * always 10 bytes
  1972. */
  1973. if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
  1974. /* BCH */
  1975. host->bch_enabled = true;
  1976. ecc_mode = 0;
  1977. if (wide_bus) {
  1978. host->ecc_bytes_hw = 8;
  1979. host->spare_bytes = 2;
  1980. host->bbm_size = 2;
  1981. } else {
  1982. host->ecc_bytes_hw = 7;
  1983. host->spare_bytes = 4;
  1984. host->bbm_size = 1;
  1985. }
  1986. } else {
  1987. /* RS */
  1988. host->ecc_bytes_hw = 10;
  1989. if (wide_bus) {
  1990. host->spare_bytes = 0;
  1991. host->bbm_size = 2;
  1992. } else {
  1993. host->spare_bytes = 1;
  1994. host->bbm_size = 1;
  1995. }
  1996. }
  1997. }
  1998. /*
  1999. * we consider ecc->bytes as the sum of all the non-data content in a
  2000. * step. It gives us a clean representation of the oob area (even if
  2001. * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
  2002. * ECC and 12 bytes for 4 bit ECC
  2003. */
  2004. ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
  2005. ecc->read_page = qcom_nandc_read_page;
  2006. ecc->read_page_raw = qcom_nandc_read_page_raw;
  2007. ecc->read_oob = qcom_nandc_read_oob;
  2008. ecc->write_page = qcom_nandc_write_page;
  2009. ecc->write_page_raw = qcom_nandc_write_page_raw;
  2010. ecc->write_oob = qcom_nandc_write_oob;
  2011. ecc->mode = NAND_ECC_HW;
  2012. mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
  2013. cwperpage = mtd->writesize / ecc->size;
  2014. nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
  2015. cwperpage);
  2016. /*
  2017. * DATA_UD_BYTES varies based on whether the read/write command protects
  2018. * spare data with ECC too. We protect spare data by default, so we set
  2019. * it to main + spare data, which are 512 and 4 bytes respectively.
  2020. */
  2021. host->cw_data = 516;
  2022. /*
  2023. * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
  2024. * for 8 bit ECC
  2025. */
  2026. host->cw_size = host->cw_data + ecc->bytes;
  2027. if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
  2028. dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
  2029. return -EINVAL;
  2030. }
  2031. bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
  2032. host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
  2033. | host->cw_data << UD_SIZE_BYTES
  2034. | 0 << DISABLE_STATUS_AFTER_WRITE
  2035. | 5 << NUM_ADDR_CYCLES
  2036. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
  2037. | 0 << STATUS_BFR_READ
  2038. | 1 << SET_RD_MODE_AFTER_STATUS
  2039. | host->spare_bytes << SPARE_SIZE_BYTES;
  2040. host->cfg1 = 7 << NAND_RECOVERY_CYCLES
  2041. | 0 << CS_ACTIVE_BSY
  2042. | bad_block_byte << BAD_BLOCK_BYTE_NUM
  2043. | 0 << BAD_BLOCK_IN_SPARE_AREA
  2044. | 2 << WR_RD_BSY_GAP
  2045. | wide_bus << WIDE_FLASH
  2046. | host->bch_enabled << ENABLE_BCH_ECC;
  2047. host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
  2048. | host->cw_size << UD_SIZE_BYTES
  2049. | 5 << NUM_ADDR_CYCLES
  2050. | 0 << SPARE_SIZE_BYTES;
  2051. host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
  2052. | 0 << CS_ACTIVE_BSY
  2053. | 17 << BAD_BLOCK_BYTE_NUM
  2054. | 1 << BAD_BLOCK_IN_SPARE_AREA
  2055. | 2 << WR_RD_BSY_GAP
  2056. | wide_bus << WIDE_FLASH
  2057. | 1 << DEV0_CFG1_ECC_DISABLE;
  2058. host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
  2059. | 0 << ECC_SW_RESET
  2060. | host->cw_data << ECC_NUM_DATA_BYTES
  2061. | 1 << ECC_FORCE_CLK_OPEN
  2062. | ecc_mode << ECC_MODE
  2063. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
  2064. host->ecc_buf_cfg = 0x203 << NUM_STEPS;
  2065. host->clrflashstatus = FS_READY_BSY_N;
  2066. host->clrreadstatus = 0xc0;
  2067. nandc->regs->erased_cw_detect_cfg_clr =
  2068. cpu_to_le32(CLR_ERASED_PAGE_DET);
  2069. nandc->regs->erased_cw_detect_cfg_set =
  2070. cpu_to_le32(SET_ERASED_PAGE_DET);
  2071. dev_dbg(nandc->dev,
  2072. "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
  2073. host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
  2074. host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
  2075. cwperpage);
  2076. return 0;
  2077. }
  2078. static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
  2079. {
  2080. int ret;
  2081. ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
  2082. if (ret) {
  2083. dev_err(nandc->dev, "failed to set DMA mask\n");
  2084. return ret;
  2085. }
  2086. /*
  2087. * we use the internal buffer for reading ONFI params, reading small
  2088. * data like ID and status, and preforming read-copy-write operations
  2089. * when writing to a codeword partially. 532 is the maximum possible
  2090. * size of a codeword for our nand controller
  2091. */
  2092. nandc->buf_size = 532;
  2093. nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
  2094. GFP_KERNEL);
  2095. if (!nandc->data_buffer)
  2096. return -ENOMEM;
  2097. nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
  2098. GFP_KERNEL);
  2099. if (!nandc->regs)
  2100. return -ENOMEM;
  2101. nandc->reg_read_buf = devm_kcalloc(nandc->dev,
  2102. MAX_REG_RD, sizeof(*nandc->reg_read_buf),
  2103. GFP_KERNEL);
  2104. if (!nandc->reg_read_buf)
  2105. return -ENOMEM;
  2106. if (nandc->props->is_bam) {
  2107. nandc->reg_read_dma =
  2108. dma_map_single(nandc->dev, nandc->reg_read_buf,
  2109. MAX_REG_RD *
  2110. sizeof(*nandc->reg_read_buf),
  2111. DMA_FROM_DEVICE);
  2112. if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
  2113. dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
  2114. return -EIO;
  2115. }
  2116. nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
  2117. if (!nandc->tx_chan) {
  2118. dev_err(nandc->dev, "failed to request tx channel\n");
  2119. return -ENODEV;
  2120. }
  2121. nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
  2122. if (!nandc->rx_chan) {
  2123. dev_err(nandc->dev, "failed to request rx channel\n");
  2124. return -ENODEV;
  2125. }
  2126. nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
  2127. if (!nandc->cmd_chan) {
  2128. dev_err(nandc->dev, "failed to request cmd channel\n");
  2129. return -ENODEV;
  2130. }
  2131. /*
  2132. * Initially allocate BAM transaction to read ONFI param page.
  2133. * After detecting all the devices, this BAM transaction will
  2134. * be freed and the next BAM tranasction will be allocated with
  2135. * maximum codeword size
  2136. */
  2137. nandc->max_cwperpage = 1;
  2138. nandc->bam_txn = alloc_bam_transaction(nandc);
  2139. if (!nandc->bam_txn) {
  2140. dev_err(nandc->dev,
  2141. "failed to allocate bam transaction\n");
  2142. return -ENOMEM;
  2143. }
  2144. } else {
  2145. nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
  2146. if (!nandc->chan) {
  2147. dev_err(nandc->dev,
  2148. "failed to request slave channel\n");
  2149. return -ENODEV;
  2150. }
  2151. }
  2152. INIT_LIST_HEAD(&nandc->desc_list);
  2153. INIT_LIST_HEAD(&nandc->host_list);
  2154. nand_hw_control_init(&nandc->controller);
  2155. return 0;
  2156. }
  2157. static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
  2158. {
  2159. if (nandc->props->is_bam) {
  2160. if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
  2161. dma_unmap_single(nandc->dev, nandc->reg_read_dma,
  2162. MAX_REG_RD *
  2163. sizeof(*nandc->reg_read_buf),
  2164. DMA_FROM_DEVICE);
  2165. if (nandc->tx_chan)
  2166. dma_release_channel(nandc->tx_chan);
  2167. if (nandc->rx_chan)
  2168. dma_release_channel(nandc->rx_chan);
  2169. if (nandc->cmd_chan)
  2170. dma_release_channel(nandc->cmd_chan);
  2171. } else {
  2172. if (nandc->chan)
  2173. dma_release_channel(nandc->chan);
  2174. }
  2175. }
  2176. /* one time setup of a few nand controller registers */
  2177. static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
  2178. {
  2179. u32 nand_ctrl;
  2180. /* kill onenand */
  2181. nandc_write(nandc, SFLASHC_BURST_CFG, 0);
  2182. nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
  2183. NAND_DEV_CMD_VLD_VAL);
  2184. /* enable ADM or BAM DMA */
  2185. if (nandc->props->is_bam) {
  2186. nand_ctrl = nandc_read(nandc, NAND_CTRL);
  2187. nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
  2188. } else {
  2189. nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  2190. }
  2191. /* save the original values of these registers */
  2192. nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
  2193. nandc->vld = NAND_DEV_CMD_VLD_VAL;
  2194. return 0;
  2195. }
  2196. static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
  2197. struct qcom_nand_host *host,
  2198. struct device_node *dn)
  2199. {
  2200. struct nand_chip *chip = &host->chip;
  2201. struct mtd_info *mtd = nand_to_mtd(chip);
  2202. struct device *dev = nandc->dev;
  2203. int ret;
  2204. ret = of_property_read_u32(dn, "reg", &host->cs);
  2205. if (ret) {
  2206. dev_err(dev, "can't get chip-select\n");
  2207. return -ENXIO;
  2208. }
  2209. nand_set_flash_node(chip, dn);
  2210. mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
  2211. if (!mtd->name)
  2212. return -ENOMEM;
  2213. mtd->owner = THIS_MODULE;
  2214. mtd->dev.parent = dev;
  2215. chip->cmdfunc = qcom_nandc_command;
  2216. chip->select_chip = qcom_nandc_select_chip;
  2217. chip->read_byte = qcom_nandc_read_byte;
  2218. chip->read_buf = qcom_nandc_read_buf;
  2219. chip->write_buf = qcom_nandc_write_buf;
  2220. chip->set_features = nand_get_set_features_notsupp;
  2221. chip->get_features = nand_get_set_features_notsupp;
  2222. /*
  2223. * the bad block marker is readable only when we read the last codeword
  2224. * of a page with ECC disabled. currently, the nand_base and nand_bbt
  2225. * helpers don't allow us to read BB from a nand chip with ECC
  2226. * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
  2227. * and block_markbad helpers until we permanently switch to using
  2228. * MTD_OPS_RAW for all drivers (with the help of badblockbits)
  2229. */
  2230. chip->block_bad = qcom_nandc_block_bad;
  2231. chip->block_markbad = qcom_nandc_block_markbad;
  2232. chip->controller = &nandc->controller;
  2233. chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
  2234. NAND_SKIP_BBTSCAN;
  2235. /* set up initial status value */
  2236. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  2237. ret = nand_scan_ident(mtd, 1, NULL);
  2238. if (ret)
  2239. return ret;
  2240. ret = qcom_nand_host_setup(host);
  2241. return ret;
  2242. }
  2243. static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
  2244. struct qcom_nand_host *host,
  2245. struct device_node *dn)
  2246. {
  2247. struct nand_chip *chip = &host->chip;
  2248. struct mtd_info *mtd = nand_to_mtd(chip);
  2249. int ret;
  2250. ret = nand_scan_tail(mtd);
  2251. if (ret)
  2252. return ret;
  2253. ret = mtd_device_register(mtd, NULL, 0);
  2254. if (ret)
  2255. nand_cleanup(mtd_to_nand(mtd));
  2256. return ret;
  2257. }
  2258. static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
  2259. {
  2260. struct device *dev = nandc->dev;
  2261. struct device_node *dn = dev->of_node, *child;
  2262. struct qcom_nand_host *host, *tmp;
  2263. int ret;
  2264. for_each_available_child_of_node(dn, child) {
  2265. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  2266. if (!host) {
  2267. of_node_put(child);
  2268. return -ENOMEM;
  2269. }
  2270. ret = qcom_nand_host_init(nandc, host, child);
  2271. if (ret) {
  2272. devm_kfree(dev, host);
  2273. continue;
  2274. }
  2275. list_add_tail(&host->node, &nandc->host_list);
  2276. }
  2277. if (list_empty(&nandc->host_list))
  2278. return -ENODEV;
  2279. if (nandc->props->is_bam) {
  2280. free_bam_transaction(nandc);
  2281. nandc->bam_txn = alloc_bam_transaction(nandc);
  2282. if (!nandc->bam_txn) {
  2283. dev_err(nandc->dev,
  2284. "failed to allocate bam transaction\n");
  2285. return -ENOMEM;
  2286. }
  2287. }
  2288. list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
  2289. ret = qcom_nand_mtd_register(nandc, host, child);
  2290. if (ret) {
  2291. list_del(&host->node);
  2292. devm_kfree(dev, host);
  2293. }
  2294. }
  2295. if (list_empty(&nandc->host_list))
  2296. return -ENODEV;
  2297. return 0;
  2298. }
  2299. /* parse custom DT properties here */
  2300. static int qcom_nandc_parse_dt(struct platform_device *pdev)
  2301. {
  2302. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  2303. struct device_node *np = nandc->dev->of_node;
  2304. int ret;
  2305. if (!nandc->props->is_bam) {
  2306. ret = of_property_read_u32(np, "qcom,cmd-crci",
  2307. &nandc->cmd_crci);
  2308. if (ret) {
  2309. dev_err(nandc->dev, "command CRCI unspecified\n");
  2310. return ret;
  2311. }
  2312. ret = of_property_read_u32(np, "qcom,data-crci",
  2313. &nandc->data_crci);
  2314. if (ret) {
  2315. dev_err(nandc->dev, "data CRCI unspecified\n");
  2316. return ret;
  2317. }
  2318. }
  2319. return 0;
  2320. }
  2321. static int qcom_nandc_probe(struct platform_device *pdev)
  2322. {
  2323. struct qcom_nand_controller *nandc;
  2324. const void *dev_data;
  2325. struct device *dev = &pdev->dev;
  2326. struct resource *res;
  2327. int ret;
  2328. nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
  2329. if (!nandc)
  2330. return -ENOMEM;
  2331. platform_set_drvdata(pdev, nandc);
  2332. nandc->dev = dev;
  2333. dev_data = of_device_get_match_data(dev);
  2334. if (!dev_data) {
  2335. dev_err(&pdev->dev, "failed to get device data\n");
  2336. return -ENODEV;
  2337. }
  2338. nandc->props = dev_data;
  2339. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2340. nandc->base = devm_ioremap_resource(dev, res);
  2341. if (IS_ERR(nandc->base))
  2342. return PTR_ERR(nandc->base);
  2343. nandc->base_phys = res->start;
  2344. nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
  2345. nandc->core_clk = devm_clk_get(dev, "core");
  2346. if (IS_ERR(nandc->core_clk))
  2347. return PTR_ERR(nandc->core_clk);
  2348. nandc->aon_clk = devm_clk_get(dev, "aon");
  2349. if (IS_ERR(nandc->aon_clk))
  2350. return PTR_ERR(nandc->aon_clk);
  2351. ret = qcom_nandc_parse_dt(pdev);
  2352. if (ret)
  2353. return ret;
  2354. ret = qcom_nandc_alloc(nandc);
  2355. if (ret)
  2356. goto err_core_clk;
  2357. ret = clk_prepare_enable(nandc->core_clk);
  2358. if (ret)
  2359. goto err_core_clk;
  2360. ret = clk_prepare_enable(nandc->aon_clk);
  2361. if (ret)
  2362. goto err_aon_clk;
  2363. ret = qcom_nandc_setup(nandc);
  2364. if (ret)
  2365. goto err_setup;
  2366. ret = qcom_probe_nand_devices(nandc);
  2367. if (ret)
  2368. goto err_setup;
  2369. return 0;
  2370. err_setup:
  2371. clk_disable_unprepare(nandc->aon_clk);
  2372. err_aon_clk:
  2373. clk_disable_unprepare(nandc->core_clk);
  2374. err_core_clk:
  2375. qcom_nandc_unalloc(nandc);
  2376. return ret;
  2377. }
  2378. static int qcom_nandc_remove(struct platform_device *pdev)
  2379. {
  2380. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  2381. struct qcom_nand_host *host;
  2382. list_for_each_entry(host, &nandc->host_list, node)
  2383. nand_release(nand_to_mtd(&host->chip));
  2384. qcom_nandc_unalloc(nandc);
  2385. clk_disable_unprepare(nandc->aon_clk);
  2386. clk_disable_unprepare(nandc->core_clk);
  2387. return 0;
  2388. }
  2389. static const struct qcom_nandc_props ipq806x_nandc_props = {
  2390. .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
  2391. .is_bam = false,
  2392. .dev_cmd_reg_start = 0x0,
  2393. };
  2394. static const struct qcom_nandc_props ipq4019_nandc_props = {
  2395. .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
  2396. .is_bam = true,
  2397. .dev_cmd_reg_start = 0x0,
  2398. };
  2399. static const struct qcom_nandc_props ipq8074_nandc_props = {
  2400. .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
  2401. .is_bam = true,
  2402. .dev_cmd_reg_start = 0x7000,
  2403. };
  2404. /*
  2405. * data will hold a struct pointer containing more differences once we support
  2406. * more controller variants
  2407. */
  2408. static const struct of_device_id qcom_nandc_of_match[] = {
  2409. {
  2410. .compatible = "qcom,ipq806x-nand",
  2411. .data = &ipq806x_nandc_props,
  2412. },
  2413. {
  2414. .compatible = "qcom,ipq4019-nand",
  2415. .data = &ipq4019_nandc_props,
  2416. },
  2417. {
  2418. .compatible = "qcom,ipq8074-nand",
  2419. .data = &ipq8074_nandc_props,
  2420. },
  2421. {}
  2422. };
  2423. MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
  2424. static struct platform_driver qcom_nandc_driver = {
  2425. .driver = {
  2426. .name = "qcom-nandc",
  2427. .of_match_table = qcom_nandc_of_match,
  2428. },
  2429. .probe = qcom_nandc_probe,
  2430. .remove = qcom_nandc_remove,
  2431. };
  2432. module_platform_driver(qcom_nandc_driver);
  2433. MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
  2434. MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
  2435. MODULE_LICENSE("GPL v2");