qcom_nandc.c 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/slab.h>
  15. #include <linux/bitops.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/module.h>
  19. #include <linux/mtd/rawnand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/delay.h>
  24. #include <linux/dma/qcom_bam_dma.h>
  25. /* NANDc reg offsets */
  26. #define NAND_FLASH_CMD 0x00
  27. #define NAND_ADDR0 0x04
  28. #define NAND_ADDR1 0x08
  29. #define NAND_FLASH_CHIP_SELECT 0x0c
  30. #define NAND_EXEC_CMD 0x10
  31. #define NAND_FLASH_STATUS 0x14
  32. #define NAND_BUFFER_STATUS 0x18
  33. #define NAND_DEV0_CFG0 0x20
  34. #define NAND_DEV0_CFG1 0x24
  35. #define NAND_DEV0_ECC_CFG 0x28
  36. #define NAND_DEV1_ECC_CFG 0x2c
  37. #define NAND_DEV1_CFG0 0x30
  38. #define NAND_DEV1_CFG1 0x34
  39. #define NAND_READ_ID 0x40
  40. #define NAND_READ_STATUS 0x44
  41. #define NAND_DEV_CMD0 0xa0
  42. #define NAND_DEV_CMD1 0xa4
  43. #define NAND_DEV_CMD2 0xa8
  44. #define NAND_DEV_CMD_VLD 0xac
  45. #define SFLASHC_BURST_CFG 0xe0
  46. #define NAND_ERASED_CW_DETECT_CFG 0xe8
  47. #define NAND_ERASED_CW_DETECT_STATUS 0xec
  48. #define NAND_EBI2_ECC_BUF_CFG 0xf0
  49. #define FLASH_BUF_ACC 0x100
  50. #define NAND_CTRL 0xf00
  51. #define NAND_VERSION 0xf08
  52. #define NAND_READ_LOCATION_0 0xf20
  53. #define NAND_READ_LOCATION_1 0xf24
  54. #define NAND_READ_LOCATION_2 0xf28
  55. #define NAND_READ_LOCATION_3 0xf2c
  56. /* dummy register offsets, used by write_reg_dma */
  57. #define NAND_DEV_CMD1_RESTORE 0xdead
  58. #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
  59. /* NAND_FLASH_CMD bits */
  60. #define PAGE_ACC BIT(4)
  61. #define LAST_PAGE BIT(5)
  62. /* NAND_FLASH_CHIP_SELECT bits */
  63. #define NAND_DEV_SEL 0
  64. #define DM_EN BIT(2)
  65. /* NAND_FLASH_STATUS bits */
  66. #define FS_OP_ERR BIT(4)
  67. #define FS_READY_BSY_N BIT(5)
  68. #define FS_MPU_ERR BIT(8)
  69. #define FS_DEVICE_STS_ERR BIT(16)
  70. #define FS_DEVICE_WP BIT(23)
  71. /* NAND_BUFFER_STATUS bits */
  72. #define BS_UNCORRECTABLE_BIT BIT(8)
  73. #define BS_CORRECTABLE_ERR_MSK 0x1f
  74. /* NAND_DEVn_CFG0 bits */
  75. #define DISABLE_STATUS_AFTER_WRITE 4
  76. #define CW_PER_PAGE 6
  77. #define UD_SIZE_BYTES 9
  78. #define ECC_PARITY_SIZE_BYTES_RS 19
  79. #define SPARE_SIZE_BYTES 23
  80. #define NUM_ADDR_CYCLES 27
  81. #define STATUS_BFR_READ 30
  82. #define SET_RD_MODE_AFTER_STATUS 31
  83. /* NAND_DEVn_CFG0 bits */
  84. #define DEV0_CFG1_ECC_DISABLE 0
  85. #define WIDE_FLASH 1
  86. #define NAND_RECOVERY_CYCLES 2
  87. #define CS_ACTIVE_BSY 5
  88. #define BAD_BLOCK_BYTE_NUM 6
  89. #define BAD_BLOCK_IN_SPARE_AREA 16
  90. #define WR_RD_BSY_GAP 17
  91. #define ENABLE_BCH_ECC 27
  92. /* NAND_DEV0_ECC_CFG bits */
  93. #define ECC_CFG_ECC_DISABLE 0
  94. #define ECC_SW_RESET 1
  95. #define ECC_MODE 4
  96. #define ECC_PARITY_SIZE_BYTES_BCH 8
  97. #define ECC_NUM_DATA_BYTES 16
  98. #define ECC_FORCE_CLK_OPEN 30
  99. /* NAND_DEV_CMD1 bits */
  100. #define READ_ADDR 0
  101. /* NAND_DEV_CMD_VLD bits */
  102. #define READ_START_VLD BIT(0)
  103. #define READ_STOP_VLD BIT(1)
  104. #define WRITE_START_VLD BIT(2)
  105. #define ERASE_START_VLD BIT(3)
  106. #define SEQ_READ_START_VLD BIT(4)
  107. /* NAND_EBI2_ECC_BUF_CFG bits */
  108. #define NUM_STEPS 0
  109. /* NAND_ERASED_CW_DETECT_CFG bits */
  110. #define ERASED_CW_ECC_MASK 1
  111. #define AUTO_DETECT_RES 0
  112. #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
  113. #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
  114. #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
  115. #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
  116. #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
  117. /* NAND_ERASED_CW_DETECT_STATUS bits */
  118. #define PAGE_ALL_ERASED BIT(7)
  119. #define CODEWORD_ALL_ERASED BIT(6)
  120. #define PAGE_ERASED BIT(5)
  121. #define CODEWORD_ERASED BIT(4)
  122. #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
  123. #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
  124. /* NAND_READ_LOCATION_n bits */
  125. #define READ_LOCATION_OFFSET 0
  126. #define READ_LOCATION_SIZE 16
  127. #define READ_LOCATION_LAST 31
  128. /* Version Mask */
  129. #define NAND_VERSION_MAJOR_MASK 0xf0000000
  130. #define NAND_VERSION_MAJOR_SHIFT 28
  131. #define NAND_VERSION_MINOR_MASK 0x0fff0000
  132. #define NAND_VERSION_MINOR_SHIFT 16
  133. /* NAND OP_CMDs */
  134. #define OP_PAGE_READ 0x2
  135. #define OP_PAGE_READ_WITH_ECC 0x3
  136. #define OP_PAGE_READ_WITH_ECC_SPARE 0x4
  137. #define OP_PROGRAM_PAGE 0x6
  138. #define OP_PAGE_PROGRAM_WITH_ECC 0x7
  139. #define OP_PROGRAM_PAGE_SPARE 0x9
  140. #define OP_BLOCK_ERASE 0xa
  141. #define OP_FETCH_ID 0xb
  142. #define OP_RESET_DEVICE 0xd
  143. /* Default Value for NAND_DEV_CMD_VLD */
  144. #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
  145. ERASE_START_VLD | SEQ_READ_START_VLD)
  146. /* NAND_CTRL bits */
  147. #define BAM_MODE_EN BIT(0)
  148. /*
  149. * the NAND controller performs reads/writes with ECC in 516 byte chunks.
  150. * the driver calls the chunks 'step' or 'codeword' interchangeably
  151. */
  152. #define NANDC_STEP_SIZE 512
  153. /*
  154. * the largest page size we support is 8K, this will have 16 steps/codewords
  155. * of 512 bytes each
  156. */
  157. #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
  158. /* we read at most 3 registers per codeword scan */
  159. #define MAX_REG_RD (3 * MAX_NUM_STEPS)
  160. /* ECC modes supported by the controller */
  161. #define ECC_NONE BIT(0)
  162. #define ECC_RS_4BIT BIT(1)
  163. #define ECC_BCH_4BIT BIT(2)
  164. #define ECC_BCH_8BIT BIT(3)
  165. #define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
  166. nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
  167. ((offset) << READ_LOCATION_OFFSET) | \
  168. ((size) << READ_LOCATION_SIZE) | \
  169. ((is_last) << READ_LOCATION_LAST))
  170. /*
  171. * Returns the actual register address for all NAND_DEV_ registers
  172. * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
  173. */
  174. #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
  175. /* Returns the NAND register physical address */
  176. #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
  177. /* Returns the dma address for reg read buffer */
  178. #define reg_buf_dma_addr(chip, vaddr) \
  179. ((chip)->reg_read_dma + \
  180. ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
  181. #define QPIC_PER_CW_CMD_ELEMENTS 32
  182. #define QPIC_PER_CW_CMD_SGL 32
  183. #define QPIC_PER_CW_DATA_SGL 8
  184. #define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
  185. /*
  186. * Flags used in DMA descriptor preparation helper functions
  187. * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
  188. */
  189. /* Don't set the EOT in current tx BAM sgl */
  190. #define NAND_BAM_NO_EOT BIT(0)
  191. /* Set the NWD flag in current BAM sgl */
  192. #define NAND_BAM_NWD BIT(1)
  193. /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
  194. #define NAND_BAM_NEXT_SGL BIT(2)
  195. /*
  196. * Erased codeword status is being used two times in single transfer so this
  197. * flag will determine the current value of erased codeword status register
  198. */
  199. #define NAND_ERASED_CW_SET BIT(4)
  200. /*
  201. * This data type corresponds to the BAM transaction which will be used for all
  202. * NAND transfers.
  203. * @bam_ce - the array of BAM command elements
  204. * @cmd_sgl - sgl for NAND BAM command pipe
  205. * @data_sgl - sgl for NAND BAM consumer/producer pipe
  206. * @bam_ce_pos - the index in bam_ce which is available for next sgl
  207. * @bam_ce_start - the index in bam_ce which marks the start position ce
  208. * for current sgl. It will be used for size calculation
  209. * for current sgl
  210. * @cmd_sgl_pos - current index in command sgl.
  211. * @cmd_sgl_start - start index in command sgl.
  212. * @tx_sgl_pos - current index in data sgl for tx.
  213. * @tx_sgl_start - start index in data sgl for tx.
  214. * @rx_sgl_pos - current index in data sgl for rx.
  215. * @rx_sgl_start - start index in data sgl for rx.
  216. * @wait_second_completion - wait for second DMA desc completion before making
  217. * the NAND transfer completion.
  218. * @txn_done - completion for NAND transfer.
  219. * @last_data_desc - last DMA desc in data channel (tx/rx).
  220. * @last_cmd_desc - last DMA desc in command channel.
  221. */
  222. struct bam_transaction {
  223. struct bam_cmd_element *bam_ce;
  224. struct scatterlist *cmd_sgl;
  225. struct scatterlist *data_sgl;
  226. u32 bam_ce_pos;
  227. u32 bam_ce_start;
  228. u32 cmd_sgl_pos;
  229. u32 cmd_sgl_start;
  230. u32 tx_sgl_pos;
  231. u32 tx_sgl_start;
  232. u32 rx_sgl_pos;
  233. u32 rx_sgl_start;
  234. bool wait_second_completion;
  235. struct completion txn_done;
  236. struct dma_async_tx_descriptor *last_data_desc;
  237. struct dma_async_tx_descriptor *last_cmd_desc;
  238. };
  239. /*
  240. * This data type corresponds to the nand dma descriptor
  241. * @list - list for desc_info
  242. * @dir - DMA transfer direction
  243. * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
  244. * ADM
  245. * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
  246. * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
  247. * @dma_desc - low level DMA engine descriptor
  248. */
  249. struct desc_info {
  250. struct list_head node;
  251. enum dma_data_direction dir;
  252. union {
  253. struct scatterlist adm_sgl;
  254. struct {
  255. struct scatterlist *bam_sgl;
  256. int sgl_cnt;
  257. };
  258. };
  259. struct dma_async_tx_descriptor *dma_desc;
  260. };
  261. /*
  262. * holds the current register values that we want to write. acts as a contiguous
  263. * chunk of memory which we use to write the controller registers through DMA.
  264. */
  265. struct nandc_regs {
  266. __le32 cmd;
  267. __le32 addr0;
  268. __le32 addr1;
  269. __le32 chip_sel;
  270. __le32 exec;
  271. __le32 cfg0;
  272. __le32 cfg1;
  273. __le32 ecc_bch_cfg;
  274. __le32 clrflashstatus;
  275. __le32 clrreadstatus;
  276. __le32 cmd1;
  277. __le32 vld;
  278. __le32 orig_cmd1;
  279. __le32 orig_vld;
  280. __le32 ecc_buf_cfg;
  281. __le32 read_location0;
  282. __le32 read_location1;
  283. __le32 read_location2;
  284. __le32 read_location3;
  285. __le32 erased_cw_detect_cfg_clr;
  286. __le32 erased_cw_detect_cfg_set;
  287. };
  288. /*
  289. * NAND controller data struct
  290. *
  291. * @controller: base controller structure
  292. * @host_list: list containing all the chips attached to the
  293. * controller
  294. * @dev: parent device
  295. * @base: MMIO base
  296. * @base_phys: physical base address of controller registers
  297. * @base_dma: dma base address of controller registers
  298. * @core_clk: controller clock
  299. * @aon_clk: another controller clock
  300. *
  301. * @chan: dma channel
  302. * @cmd_crci: ADM DMA CRCI for command flow control
  303. * @data_crci: ADM DMA CRCI for data flow control
  304. * @desc_list: DMA descriptor list (list of desc_infos)
  305. *
  306. * @data_buffer: our local DMA buffer for page read/writes,
  307. * used when we can't use the buffer provided
  308. * by upper layers directly
  309. * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
  310. * functions
  311. * @reg_read_buf: local buffer for reading back registers via DMA
  312. * @reg_read_dma: contains dma address for register read buffer
  313. * @reg_read_pos: marker for data read in reg_read_buf
  314. *
  315. * @regs: a contiguous chunk of memory for DMA register
  316. * writes. contains the register values to be
  317. * written to controller
  318. * @cmd1/vld: some fixed controller register values
  319. * @props: properties of current NAND controller,
  320. * initialized via DT match data
  321. * @max_cwperpage: maximum QPIC codewords required. calculated
  322. * from all connected NAND devices pagesize
  323. */
  324. struct qcom_nand_controller {
  325. struct nand_controller controller;
  326. struct list_head host_list;
  327. struct device *dev;
  328. void __iomem *base;
  329. phys_addr_t base_phys;
  330. dma_addr_t base_dma;
  331. struct clk *core_clk;
  332. struct clk *aon_clk;
  333. union {
  334. /* will be used only by QPIC for BAM DMA */
  335. struct {
  336. struct dma_chan *tx_chan;
  337. struct dma_chan *rx_chan;
  338. struct dma_chan *cmd_chan;
  339. };
  340. /* will be used only by EBI2 for ADM DMA */
  341. struct {
  342. struct dma_chan *chan;
  343. unsigned int cmd_crci;
  344. unsigned int data_crci;
  345. };
  346. };
  347. struct list_head desc_list;
  348. struct bam_transaction *bam_txn;
  349. u8 *data_buffer;
  350. int buf_size;
  351. int buf_count;
  352. int buf_start;
  353. unsigned int max_cwperpage;
  354. __le32 *reg_read_buf;
  355. dma_addr_t reg_read_dma;
  356. int reg_read_pos;
  357. struct nandc_regs *regs;
  358. u32 cmd1, vld;
  359. const struct qcom_nandc_props *props;
  360. };
  361. /*
  362. * NAND chip structure
  363. *
  364. * @chip: base NAND chip structure
  365. * @node: list node to add itself to host_list in
  366. * qcom_nand_controller
  367. *
  368. * @cs: chip select value for this chip
  369. * @cw_size: the number of bytes in a single step/codeword
  370. * of a page, consisting of all data, ecc, spare
  371. * and reserved bytes
  372. * @cw_data: the number of bytes within a codeword protected
  373. * by ECC
  374. * @use_ecc: request the controller to use ECC for the
  375. * upcoming read/write
  376. * @bch_enabled: flag to tell whether BCH ECC mode is used
  377. * @ecc_bytes_hw: ECC bytes used by controller hardware for this
  378. * chip
  379. * @status: value to be returned if NAND_CMD_STATUS command
  380. * is executed
  381. * @last_command: keeps track of last command on this chip. used
  382. * for reading correct status
  383. *
  384. * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
  385. * ecc/non-ecc mode for the current nand flash
  386. * device
  387. */
  388. struct qcom_nand_host {
  389. struct nand_chip chip;
  390. struct list_head node;
  391. int cs;
  392. int cw_size;
  393. int cw_data;
  394. bool use_ecc;
  395. bool bch_enabled;
  396. int ecc_bytes_hw;
  397. int spare_bytes;
  398. int bbm_size;
  399. u8 status;
  400. int last_command;
  401. u32 cfg0, cfg1;
  402. u32 cfg0_raw, cfg1_raw;
  403. u32 ecc_buf_cfg;
  404. u32 ecc_bch_cfg;
  405. u32 clrflashstatus;
  406. u32 clrreadstatus;
  407. };
  408. /*
  409. * This data type corresponds to the NAND controller properties which varies
  410. * among different NAND controllers.
  411. * @ecc_modes - ecc mode for NAND
  412. * @is_bam - whether NAND controller is using BAM
  413. * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
  414. */
  415. struct qcom_nandc_props {
  416. u32 ecc_modes;
  417. bool is_bam;
  418. u32 dev_cmd_reg_start;
  419. };
  420. /* Frees the BAM transaction memory */
  421. static void free_bam_transaction(struct qcom_nand_controller *nandc)
  422. {
  423. struct bam_transaction *bam_txn = nandc->bam_txn;
  424. devm_kfree(nandc->dev, bam_txn);
  425. }
  426. /* Allocates and Initializes the BAM transaction */
  427. static struct bam_transaction *
  428. alloc_bam_transaction(struct qcom_nand_controller *nandc)
  429. {
  430. struct bam_transaction *bam_txn;
  431. size_t bam_txn_size;
  432. unsigned int num_cw = nandc->max_cwperpage;
  433. void *bam_txn_buf;
  434. bam_txn_size =
  435. sizeof(*bam_txn) + num_cw *
  436. ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
  437. (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
  438. (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
  439. bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
  440. if (!bam_txn_buf)
  441. return NULL;
  442. bam_txn = bam_txn_buf;
  443. bam_txn_buf += sizeof(*bam_txn);
  444. bam_txn->bam_ce = bam_txn_buf;
  445. bam_txn_buf +=
  446. sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
  447. bam_txn->cmd_sgl = bam_txn_buf;
  448. bam_txn_buf +=
  449. sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
  450. bam_txn->data_sgl = bam_txn_buf;
  451. init_completion(&bam_txn->txn_done);
  452. return bam_txn;
  453. }
  454. /* Clears the BAM transaction indexes */
  455. static void clear_bam_transaction(struct qcom_nand_controller *nandc)
  456. {
  457. struct bam_transaction *bam_txn = nandc->bam_txn;
  458. if (!nandc->props->is_bam)
  459. return;
  460. bam_txn->bam_ce_pos = 0;
  461. bam_txn->bam_ce_start = 0;
  462. bam_txn->cmd_sgl_pos = 0;
  463. bam_txn->cmd_sgl_start = 0;
  464. bam_txn->tx_sgl_pos = 0;
  465. bam_txn->tx_sgl_start = 0;
  466. bam_txn->rx_sgl_pos = 0;
  467. bam_txn->rx_sgl_start = 0;
  468. bam_txn->last_data_desc = NULL;
  469. bam_txn->wait_second_completion = false;
  470. sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
  471. QPIC_PER_CW_CMD_SGL);
  472. sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
  473. QPIC_PER_CW_DATA_SGL);
  474. reinit_completion(&bam_txn->txn_done);
  475. }
  476. /* Callback for DMA descriptor completion */
  477. static void qpic_bam_dma_done(void *data)
  478. {
  479. struct bam_transaction *bam_txn = data;
  480. /*
  481. * In case of data transfer with NAND, 2 callbacks will be generated.
  482. * One for command channel and another one for data channel.
  483. * If current transaction has data descriptors
  484. * (i.e. wait_second_completion is true), then set this to false
  485. * and wait for second DMA descriptor completion.
  486. */
  487. if (bam_txn->wait_second_completion)
  488. bam_txn->wait_second_completion = false;
  489. else
  490. complete(&bam_txn->txn_done);
  491. }
  492. static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
  493. {
  494. return container_of(chip, struct qcom_nand_host, chip);
  495. }
  496. static inline struct qcom_nand_controller *
  497. get_qcom_nand_controller(struct nand_chip *chip)
  498. {
  499. return container_of(chip->controller, struct qcom_nand_controller,
  500. controller);
  501. }
  502. static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
  503. {
  504. return ioread32(nandc->base + offset);
  505. }
  506. static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
  507. u32 val)
  508. {
  509. iowrite32(val, nandc->base + offset);
  510. }
  511. static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
  512. bool is_cpu)
  513. {
  514. if (!nandc->props->is_bam)
  515. return;
  516. if (is_cpu)
  517. dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
  518. MAX_REG_RD *
  519. sizeof(*nandc->reg_read_buf),
  520. DMA_FROM_DEVICE);
  521. else
  522. dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
  523. MAX_REG_RD *
  524. sizeof(*nandc->reg_read_buf),
  525. DMA_FROM_DEVICE);
  526. }
  527. static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
  528. {
  529. switch (offset) {
  530. case NAND_FLASH_CMD:
  531. return &regs->cmd;
  532. case NAND_ADDR0:
  533. return &regs->addr0;
  534. case NAND_ADDR1:
  535. return &regs->addr1;
  536. case NAND_FLASH_CHIP_SELECT:
  537. return &regs->chip_sel;
  538. case NAND_EXEC_CMD:
  539. return &regs->exec;
  540. case NAND_FLASH_STATUS:
  541. return &regs->clrflashstatus;
  542. case NAND_DEV0_CFG0:
  543. return &regs->cfg0;
  544. case NAND_DEV0_CFG1:
  545. return &regs->cfg1;
  546. case NAND_DEV0_ECC_CFG:
  547. return &regs->ecc_bch_cfg;
  548. case NAND_READ_STATUS:
  549. return &regs->clrreadstatus;
  550. case NAND_DEV_CMD1:
  551. return &regs->cmd1;
  552. case NAND_DEV_CMD1_RESTORE:
  553. return &regs->orig_cmd1;
  554. case NAND_DEV_CMD_VLD:
  555. return &regs->vld;
  556. case NAND_DEV_CMD_VLD_RESTORE:
  557. return &regs->orig_vld;
  558. case NAND_EBI2_ECC_BUF_CFG:
  559. return &regs->ecc_buf_cfg;
  560. case NAND_READ_LOCATION_0:
  561. return &regs->read_location0;
  562. case NAND_READ_LOCATION_1:
  563. return &regs->read_location1;
  564. case NAND_READ_LOCATION_2:
  565. return &regs->read_location2;
  566. case NAND_READ_LOCATION_3:
  567. return &regs->read_location3;
  568. default:
  569. return NULL;
  570. }
  571. }
  572. static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
  573. u32 val)
  574. {
  575. struct nandc_regs *regs = nandc->regs;
  576. __le32 *reg;
  577. reg = offset_to_nandc_reg(regs, offset);
  578. if (reg)
  579. *reg = cpu_to_le32(val);
  580. }
  581. /* helper to configure address register values */
  582. static void set_address(struct qcom_nand_host *host, u16 column, int page)
  583. {
  584. struct nand_chip *chip = &host->chip;
  585. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  586. if (chip->options & NAND_BUSWIDTH_16)
  587. column >>= 1;
  588. nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
  589. nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
  590. }
  591. /*
  592. * update_rw_regs: set up read/write register values, these will be
  593. * written to the NAND controller registers via DMA
  594. *
  595. * @num_cw: number of steps for the read/write operation
  596. * @read: read or write operation
  597. */
  598. static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
  599. {
  600. struct nand_chip *chip = &host->chip;
  601. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  602. u32 cmd, cfg0, cfg1, ecc_bch_cfg;
  603. if (read) {
  604. if (host->use_ecc)
  605. cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
  606. else
  607. cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
  608. } else {
  609. cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
  610. }
  611. if (host->use_ecc) {
  612. cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
  613. (num_cw - 1) << CW_PER_PAGE;
  614. cfg1 = host->cfg1;
  615. ecc_bch_cfg = host->ecc_bch_cfg;
  616. } else {
  617. cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
  618. (num_cw - 1) << CW_PER_PAGE;
  619. cfg1 = host->cfg1_raw;
  620. ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  621. }
  622. nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
  623. nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
  624. nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
  625. nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
  626. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
  627. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  628. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  629. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  630. if (read)
  631. nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
  632. host->cw_data : host->cw_size, 1);
  633. }
  634. /*
  635. * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
  636. * for BAM. This descriptor will be added in the NAND DMA descriptor queue
  637. * which will be submitted to DMA engine.
  638. */
  639. static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
  640. struct dma_chan *chan,
  641. unsigned long flags)
  642. {
  643. struct desc_info *desc;
  644. struct scatterlist *sgl;
  645. unsigned int sgl_cnt;
  646. int ret;
  647. struct bam_transaction *bam_txn = nandc->bam_txn;
  648. enum dma_transfer_direction dir_eng;
  649. struct dma_async_tx_descriptor *dma_desc;
  650. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  651. if (!desc)
  652. return -ENOMEM;
  653. if (chan == nandc->cmd_chan) {
  654. sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
  655. sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
  656. bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
  657. dir_eng = DMA_MEM_TO_DEV;
  658. desc->dir = DMA_TO_DEVICE;
  659. } else if (chan == nandc->tx_chan) {
  660. sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
  661. sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
  662. bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
  663. dir_eng = DMA_MEM_TO_DEV;
  664. desc->dir = DMA_TO_DEVICE;
  665. } else {
  666. sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
  667. sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
  668. bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
  669. dir_eng = DMA_DEV_TO_MEM;
  670. desc->dir = DMA_FROM_DEVICE;
  671. }
  672. sg_mark_end(sgl + sgl_cnt - 1);
  673. ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
  674. if (ret == 0) {
  675. dev_err(nandc->dev, "failure in mapping desc\n");
  676. kfree(desc);
  677. return -ENOMEM;
  678. }
  679. desc->sgl_cnt = sgl_cnt;
  680. desc->bam_sgl = sgl;
  681. dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
  682. flags);
  683. if (!dma_desc) {
  684. dev_err(nandc->dev, "failure in prep desc\n");
  685. dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
  686. kfree(desc);
  687. return -EINVAL;
  688. }
  689. desc->dma_desc = dma_desc;
  690. /* update last data/command descriptor */
  691. if (chan == nandc->cmd_chan)
  692. bam_txn->last_cmd_desc = dma_desc;
  693. else
  694. bam_txn->last_data_desc = dma_desc;
  695. list_add_tail(&desc->node, &nandc->desc_list);
  696. return 0;
  697. }
  698. /*
  699. * Prepares the command descriptor for BAM DMA which will be used for NAND
  700. * register reads and writes. The command descriptor requires the command
  701. * to be formed in command element type so this function uses the command
  702. * element from bam transaction ce array and fills the same with required
  703. * data. A single SGL can contain multiple command elements so
  704. * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
  705. * after the current command element.
  706. */
  707. static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
  708. int reg_off, const void *vaddr,
  709. int size, unsigned int flags)
  710. {
  711. int bam_ce_size;
  712. int i, ret;
  713. struct bam_cmd_element *bam_ce_buffer;
  714. struct bam_transaction *bam_txn = nandc->bam_txn;
  715. bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
  716. /* fill the command desc */
  717. for (i = 0; i < size; i++) {
  718. if (read)
  719. bam_prep_ce(&bam_ce_buffer[i],
  720. nandc_reg_phys(nandc, reg_off + 4 * i),
  721. BAM_READ_COMMAND,
  722. reg_buf_dma_addr(nandc,
  723. (__le32 *)vaddr + i));
  724. else
  725. bam_prep_ce_le32(&bam_ce_buffer[i],
  726. nandc_reg_phys(nandc, reg_off + 4 * i),
  727. BAM_WRITE_COMMAND,
  728. *((__le32 *)vaddr + i));
  729. }
  730. bam_txn->bam_ce_pos += size;
  731. /* use the separate sgl after this command */
  732. if (flags & NAND_BAM_NEXT_SGL) {
  733. bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
  734. bam_ce_size = (bam_txn->bam_ce_pos -
  735. bam_txn->bam_ce_start) *
  736. sizeof(struct bam_cmd_element);
  737. sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
  738. bam_ce_buffer, bam_ce_size);
  739. bam_txn->cmd_sgl_pos++;
  740. bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
  741. if (flags & NAND_BAM_NWD) {
  742. ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
  743. DMA_PREP_FENCE |
  744. DMA_PREP_CMD);
  745. if (ret)
  746. return ret;
  747. }
  748. }
  749. return 0;
  750. }
  751. /*
  752. * Prepares the data descriptor for BAM DMA which will be used for NAND
  753. * data reads and writes.
  754. */
  755. static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
  756. const void *vaddr,
  757. int size, unsigned int flags)
  758. {
  759. int ret;
  760. struct bam_transaction *bam_txn = nandc->bam_txn;
  761. if (read) {
  762. sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
  763. vaddr, size);
  764. bam_txn->rx_sgl_pos++;
  765. } else {
  766. sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
  767. vaddr, size);
  768. bam_txn->tx_sgl_pos++;
  769. /*
  770. * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
  771. * is not set, form the DMA descriptor
  772. */
  773. if (!(flags & NAND_BAM_NO_EOT)) {
  774. ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
  775. DMA_PREP_INTERRUPT);
  776. if (ret)
  777. return ret;
  778. }
  779. }
  780. return 0;
  781. }
  782. static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
  783. int reg_off, const void *vaddr, int size,
  784. bool flow_control)
  785. {
  786. struct desc_info *desc;
  787. struct dma_async_tx_descriptor *dma_desc;
  788. struct scatterlist *sgl;
  789. struct dma_slave_config slave_conf;
  790. enum dma_transfer_direction dir_eng;
  791. int ret;
  792. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  793. if (!desc)
  794. return -ENOMEM;
  795. sgl = &desc->adm_sgl;
  796. sg_init_one(sgl, vaddr, size);
  797. if (read) {
  798. dir_eng = DMA_DEV_TO_MEM;
  799. desc->dir = DMA_FROM_DEVICE;
  800. } else {
  801. dir_eng = DMA_MEM_TO_DEV;
  802. desc->dir = DMA_TO_DEVICE;
  803. }
  804. ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
  805. if (ret == 0) {
  806. ret = -ENOMEM;
  807. goto err;
  808. }
  809. memset(&slave_conf, 0x00, sizeof(slave_conf));
  810. slave_conf.device_fc = flow_control;
  811. if (read) {
  812. slave_conf.src_maxburst = 16;
  813. slave_conf.src_addr = nandc->base_dma + reg_off;
  814. slave_conf.slave_id = nandc->data_crci;
  815. } else {
  816. slave_conf.dst_maxburst = 16;
  817. slave_conf.dst_addr = nandc->base_dma + reg_off;
  818. slave_conf.slave_id = nandc->cmd_crci;
  819. }
  820. ret = dmaengine_slave_config(nandc->chan, &slave_conf);
  821. if (ret) {
  822. dev_err(nandc->dev, "failed to configure dma channel\n");
  823. goto err;
  824. }
  825. dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
  826. if (!dma_desc) {
  827. dev_err(nandc->dev, "failed to prepare desc\n");
  828. ret = -EINVAL;
  829. goto err;
  830. }
  831. desc->dma_desc = dma_desc;
  832. list_add_tail(&desc->node, &nandc->desc_list);
  833. return 0;
  834. err:
  835. kfree(desc);
  836. return ret;
  837. }
  838. /*
  839. * read_reg_dma: prepares a descriptor to read a given number of
  840. * contiguous registers to the reg_read_buf pointer
  841. *
  842. * @first: offset of the first register in the contiguous block
  843. * @num_regs: number of registers to read
  844. * @flags: flags to control DMA descriptor preparation
  845. */
  846. static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
  847. int num_regs, unsigned int flags)
  848. {
  849. bool flow_control = false;
  850. void *vaddr;
  851. vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
  852. nandc->reg_read_pos += num_regs;
  853. if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
  854. first = dev_cmd_reg_addr(nandc, first);
  855. if (nandc->props->is_bam)
  856. return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
  857. num_regs, flags);
  858. if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
  859. flow_control = true;
  860. return prep_adm_dma_desc(nandc, true, first, vaddr,
  861. num_regs * sizeof(u32), flow_control);
  862. }
  863. /*
  864. * write_reg_dma: prepares a descriptor to write a given number of
  865. * contiguous registers
  866. *
  867. * @first: offset of the first register in the contiguous block
  868. * @num_regs: number of registers to write
  869. * @flags: flags to control DMA descriptor preparation
  870. */
  871. static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
  872. int num_regs, unsigned int flags)
  873. {
  874. bool flow_control = false;
  875. struct nandc_regs *regs = nandc->regs;
  876. void *vaddr;
  877. vaddr = offset_to_nandc_reg(regs, first);
  878. if (first == NAND_ERASED_CW_DETECT_CFG) {
  879. if (flags & NAND_ERASED_CW_SET)
  880. vaddr = &regs->erased_cw_detect_cfg_set;
  881. else
  882. vaddr = &regs->erased_cw_detect_cfg_clr;
  883. }
  884. if (first == NAND_EXEC_CMD)
  885. flags |= NAND_BAM_NWD;
  886. if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
  887. first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
  888. if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
  889. first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
  890. if (nandc->props->is_bam)
  891. return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
  892. num_regs, flags);
  893. if (first == NAND_FLASH_CMD)
  894. flow_control = true;
  895. return prep_adm_dma_desc(nandc, false, first, vaddr,
  896. num_regs * sizeof(u32), flow_control);
  897. }
  898. /*
  899. * read_data_dma: prepares a DMA descriptor to transfer data from the
  900. * controller's internal buffer to the buffer 'vaddr'
  901. *
  902. * @reg_off: offset within the controller's data buffer
  903. * @vaddr: virtual address of the buffer we want to write to
  904. * @size: DMA transaction size in bytes
  905. * @flags: flags to control DMA descriptor preparation
  906. */
  907. static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  908. const u8 *vaddr, int size, unsigned int flags)
  909. {
  910. if (nandc->props->is_bam)
  911. return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
  912. return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
  913. }
  914. /*
  915. * write_data_dma: prepares a DMA descriptor to transfer data from
  916. * 'vaddr' to the controller's internal buffer
  917. *
  918. * @reg_off: offset within the controller's data buffer
  919. * @vaddr: virtual address of the buffer we want to read from
  920. * @size: DMA transaction size in bytes
  921. * @flags: flags to control DMA descriptor preparation
  922. */
  923. static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  924. const u8 *vaddr, int size, unsigned int flags)
  925. {
  926. if (nandc->props->is_bam)
  927. return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
  928. return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
  929. }
  930. /*
  931. * Helper to prepare DMA descriptors for configuring registers
  932. * before reading a NAND page.
  933. */
  934. static void config_nand_page_read(struct qcom_nand_controller *nandc)
  935. {
  936. write_reg_dma(nandc, NAND_ADDR0, 2, 0);
  937. write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
  938. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
  939. write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
  940. write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
  941. NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
  942. }
  943. /*
  944. * Helper to prepare DMA descriptors for configuring registers
  945. * before reading each codeword in NAND page.
  946. */
  947. static void
  948. config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
  949. {
  950. if (nandc->props->is_bam)
  951. write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
  952. NAND_BAM_NEXT_SGL);
  953. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  954. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  955. if (use_ecc) {
  956. read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
  957. read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
  958. NAND_BAM_NEXT_SGL);
  959. } else {
  960. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  961. }
  962. }
  963. /*
  964. * Helper to prepare dma descriptors to configure registers needed for reading a
  965. * single codeword in page
  966. */
  967. static void
  968. config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
  969. bool use_ecc)
  970. {
  971. config_nand_page_read(nandc);
  972. config_nand_cw_read(nandc, use_ecc);
  973. }
  974. /*
  975. * Helper to prepare DMA descriptors used to configure registers needed for
  976. * before writing a NAND page.
  977. */
  978. static void config_nand_page_write(struct qcom_nand_controller *nandc)
  979. {
  980. write_reg_dma(nandc, NAND_ADDR0, 2, 0);
  981. write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
  982. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
  983. NAND_BAM_NEXT_SGL);
  984. }
  985. /*
  986. * Helper to prepare DMA descriptors for configuring registers
  987. * before writing each codeword in NAND page.
  988. */
  989. static void config_nand_cw_write(struct qcom_nand_controller *nandc)
  990. {
  991. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  992. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  993. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  994. write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
  995. write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
  996. }
  997. /*
  998. * the following functions are used within chip->legacy.cmdfunc() to
  999. * perform different NAND_CMD_* commands
  1000. */
  1001. /* sets up descriptors for NAND_CMD_PARAM */
  1002. static int nandc_param(struct qcom_nand_host *host)
  1003. {
  1004. struct nand_chip *chip = &host->chip;
  1005. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1006. /*
  1007. * NAND_CMD_PARAM is called before we know much about the FLASH chip
  1008. * in use. we configure the controller to perform a raw read of 512
  1009. * bytes to read onfi params
  1010. */
  1011. nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
  1012. nandc_set_reg(nandc, NAND_ADDR0, 0);
  1013. nandc_set_reg(nandc, NAND_ADDR1, 0);
  1014. nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
  1015. | 512 << UD_SIZE_BYTES
  1016. | 5 << NUM_ADDR_CYCLES
  1017. | 0 << SPARE_SIZE_BYTES);
  1018. nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
  1019. | 0 << CS_ACTIVE_BSY
  1020. | 17 << BAD_BLOCK_BYTE_NUM
  1021. | 1 << BAD_BLOCK_IN_SPARE_AREA
  1022. | 2 << WR_RD_BSY_GAP
  1023. | 0 << WIDE_FLASH
  1024. | 1 << DEV0_CFG1_ECC_DISABLE);
  1025. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
  1026. /* configure CMD1 and VLD for ONFI param probing */
  1027. nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
  1028. (nandc->vld & ~READ_START_VLD));
  1029. nandc_set_reg(nandc, NAND_DEV_CMD1,
  1030. (nandc->cmd1 & ~(0xFF << READ_ADDR))
  1031. | NAND_CMD_PARAM << READ_ADDR);
  1032. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1033. nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
  1034. nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
  1035. nandc_set_read_loc(nandc, 0, 0, 512, 1);
  1036. write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
  1037. write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
  1038. nandc->buf_count = 512;
  1039. memset(nandc->data_buffer, 0xff, nandc->buf_count);
  1040. config_nand_single_cw_page_read(nandc, false);
  1041. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  1042. nandc->buf_count, 0);
  1043. /* restore CMD1 and VLD regs */
  1044. write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
  1045. write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
  1046. return 0;
  1047. }
  1048. /* sets up descriptors for NAND_CMD_ERASE1 */
  1049. static int erase_block(struct qcom_nand_host *host, int page_addr)
  1050. {
  1051. struct nand_chip *chip = &host->chip;
  1052. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1053. nandc_set_reg(nandc, NAND_FLASH_CMD,
  1054. OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
  1055. nandc_set_reg(nandc, NAND_ADDR0, page_addr);
  1056. nandc_set_reg(nandc, NAND_ADDR1, 0);
  1057. nandc_set_reg(nandc, NAND_DEV0_CFG0,
  1058. host->cfg0_raw & ~(7 << CW_PER_PAGE));
  1059. nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
  1060. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1061. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  1062. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  1063. write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
  1064. write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
  1065. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1066. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  1067. write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
  1068. write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
  1069. return 0;
  1070. }
  1071. /* sets up descriptors for NAND_CMD_READID */
  1072. static int read_id(struct qcom_nand_host *host, int column)
  1073. {
  1074. struct nand_chip *chip = &host->chip;
  1075. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1076. if (column == -1)
  1077. return 0;
  1078. nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
  1079. nandc_set_reg(nandc, NAND_ADDR0, column);
  1080. nandc_set_reg(nandc, NAND_ADDR1, 0);
  1081. nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
  1082. nandc->props->is_bam ? 0 : DM_EN);
  1083. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1084. write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
  1085. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1086. read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
  1087. return 0;
  1088. }
  1089. /* sets up descriptors for NAND_CMD_RESET */
  1090. static int reset(struct qcom_nand_host *host)
  1091. {
  1092. struct nand_chip *chip = &host->chip;
  1093. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1094. nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
  1095. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1096. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  1097. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1098. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  1099. return 0;
  1100. }
  1101. /* helpers to submit/free our list of dma descriptors */
  1102. static int submit_descs(struct qcom_nand_controller *nandc)
  1103. {
  1104. struct desc_info *desc;
  1105. dma_cookie_t cookie = 0;
  1106. struct bam_transaction *bam_txn = nandc->bam_txn;
  1107. int r;
  1108. if (nandc->props->is_bam) {
  1109. if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
  1110. r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
  1111. if (r)
  1112. return r;
  1113. }
  1114. if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
  1115. r = prepare_bam_async_desc(nandc, nandc->tx_chan,
  1116. DMA_PREP_INTERRUPT);
  1117. if (r)
  1118. return r;
  1119. }
  1120. if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
  1121. r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
  1122. DMA_PREP_CMD);
  1123. if (r)
  1124. return r;
  1125. }
  1126. }
  1127. list_for_each_entry(desc, &nandc->desc_list, node)
  1128. cookie = dmaengine_submit(desc->dma_desc);
  1129. if (nandc->props->is_bam) {
  1130. bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
  1131. bam_txn->last_cmd_desc->callback_param = bam_txn;
  1132. if (bam_txn->last_data_desc) {
  1133. bam_txn->last_data_desc->callback = qpic_bam_dma_done;
  1134. bam_txn->last_data_desc->callback_param = bam_txn;
  1135. bam_txn->wait_second_completion = true;
  1136. }
  1137. dma_async_issue_pending(nandc->tx_chan);
  1138. dma_async_issue_pending(nandc->rx_chan);
  1139. dma_async_issue_pending(nandc->cmd_chan);
  1140. if (!wait_for_completion_timeout(&bam_txn->txn_done,
  1141. QPIC_NAND_COMPLETION_TIMEOUT))
  1142. return -ETIMEDOUT;
  1143. } else {
  1144. if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
  1145. return -ETIMEDOUT;
  1146. }
  1147. return 0;
  1148. }
  1149. static void free_descs(struct qcom_nand_controller *nandc)
  1150. {
  1151. struct desc_info *desc, *n;
  1152. list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
  1153. list_del(&desc->node);
  1154. if (nandc->props->is_bam)
  1155. dma_unmap_sg(nandc->dev, desc->bam_sgl,
  1156. desc->sgl_cnt, desc->dir);
  1157. else
  1158. dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
  1159. desc->dir);
  1160. kfree(desc);
  1161. }
  1162. }
  1163. /* reset the register read buffer for next NAND operation */
  1164. static void clear_read_regs(struct qcom_nand_controller *nandc)
  1165. {
  1166. nandc->reg_read_pos = 0;
  1167. nandc_read_buffer_sync(nandc, false);
  1168. }
  1169. static void pre_command(struct qcom_nand_host *host, int command)
  1170. {
  1171. struct nand_chip *chip = &host->chip;
  1172. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1173. nandc->buf_count = 0;
  1174. nandc->buf_start = 0;
  1175. host->use_ecc = false;
  1176. host->last_command = command;
  1177. clear_read_regs(nandc);
  1178. if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
  1179. command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
  1180. clear_bam_transaction(nandc);
  1181. }
  1182. /*
  1183. * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
  1184. * privately maintained status byte, this status byte can be read after
  1185. * NAND_CMD_STATUS is called
  1186. */
  1187. static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
  1188. {
  1189. struct nand_chip *chip = &host->chip;
  1190. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1191. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1192. int num_cw;
  1193. int i;
  1194. num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
  1195. nandc_read_buffer_sync(nandc, true);
  1196. for (i = 0; i < num_cw; i++) {
  1197. u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
  1198. if (flash_status & FS_MPU_ERR)
  1199. host->status &= ~NAND_STATUS_WP;
  1200. if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
  1201. (flash_status &
  1202. FS_DEVICE_STS_ERR)))
  1203. host->status |= NAND_STATUS_FAIL;
  1204. }
  1205. }
  1206. static void post_command(struct qcom_nand_host *host, int command)
  1207. {
  1208. struct nand_chip *chip = &host->chip;
  1209. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1210. switch (command) {
  1211. case NAND_CMD_READID:
  1212. nandc_read_buffer_sync(nandc, true);
  1213. memcpy(nandc->data_buffer, nandc->reg_read_buf,
  1214. nandc->buf_count);
  1215. break;
  1216. case NAND_CMD_PAGEPROG:
  1217. case NAND_CMD_ERASE1:
  1218. parse_erase_write_errors(host, command);
  1219. break;
  1220. default:
  1221. break;
  1222. }
  1223. }
  1224. /*
  1225. * Implements chip->legacy.cmdfunc. It's only used for a limited set of
  1226. * commands. The rest of the commands wouldn't be called by upper layers.
  1227. * For example, NAND_CMD_READOOB would never be called because we have our own
  1228. * versions of read_oob ops for nand_ecc_ctrl.
  1229. */
  1230. static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
  1231. int column, int page_addr)
  1232. {
  1233. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1234. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1235. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1236. bool wait = false;
  1237. int ret = 0;
  1238. pre_command(host, command);
  1239. switch (command) {
  1240. case NAND_CMD_RESET:
  1241. ret = reset(host);
  1242. wait = true;
  1243. break;
  1244. case NAND_CMD_READID:
  1245. nandc->buf_count = 4;
  1246. ret = read_id(host, column);
  1247. wait = true;
  1248. break;
  1249. case NAND_CMD_PARAM:
  1250. ret = nandc_param(host);
  1251. wait = true;
  1252. break;
  1253. case NAND_CMD_ERASE1:
  1254. ret = erase_block(host, page_addr);
  1255. wait = true;
  1256. break;
  1257. case NAND_CMD_READ0:
  1258. /* we read the entire page for now */
  1259. WARN_ON(column != 0);
  1260. host->use_ecc = true;
  1261. set_address(host, 0, page_addr);
  1262. update_rw_regs(host, ecc->steps, true);
  1263. break;
  1264. case NAND_CMD_SEQIN:
  1265. WARN_ON(column != 0);
  1266. set_address(host, 0, page_addr);
  1267. break;
  1268. case NAND_CMD_PAGEPROG:
  1269. case NAND_CMD_STATUS:
  1270. case NAND_CMD_NONE:
  1271. default:
  1272. break;
  1273. }
  1274. if (ret) {
  1275. dev_err(nandc->dev, "failure executing command %d\n",
  1276. command);
  1277. free_descs(nandc);
  1278. return;
  1279. }
  1280. if (wait) {
  1281. ret = submit_descs(nandc);
  1282. if (ret)
  1283. dev_err(nandc->dev,
  1284. "failure submitting descs for command %d\n",
  1285. command);
  1286. }
  1287. free_descs(nandc);
  1288. post_command(host, command);
  1289. }
  1290. /*
  1291. * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
  1292. * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
  1293. *
  1294. * when using RS ECC, the HW reports the same erros when reading an erased CW,
  1295. * but it notifies that it is an erased CW by placing special characters at
  1296. * certain offsets in the buffer.
  1297. *
  1298. * verify if the page is erased or not, and fix up the page for RS ECC by
  1299. * replacing the special characters with 0xff.
  1300. */
  1301. static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
  1302. {
  1303. u8 empty1, empty2;
  1304. /*
  1305. * an erased page flags an error in NAND_FLASH_STATUS, check if the page
  1306. * is erased by looking for 0x54s at offsets 3 and 175 from the
  1307. * beginning of each codeword
  1308. */
  1309. empty1 = data_buf[3];
  1310. empty2 = data_buf[175];
  1311. /*
  1312. * if the erased codework markers, if they exist override them with
  1313. * 0xffs
  1314. */
  1315. if ((empty1 == 0x54 && empty2 == 0xff) ||
  1316. (empty1 == 0xff && empty2 == 0x54)) {
  1317. data_buf[3] = 0xff;
  1318. data_buf[175] = 0xff;
  1319. }
  1320. /*
  1321. * check if the entire chunk contains 0xffs or not. if it doesn't, then
  1322. * restore the original values at the special offsets
  1323. */
  1324. if (memchr_inv(data_buf, 0xff, data_len)) {
  1325. data_buf[3] = empty1;
  1326. data_buf[175] = empty2;
  1327. return false;
  1328. }
  1329. return true;
  1330. }
  1331. struct read_stats {
  1332. __le32 flash;
  1333. __le32 buffer;
  1334. __le32 erased_cw;
  1335. };
  1336. /* reads back FLASH_STATUS register set by the controller */
  1337. static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
  1338. {
  1339. struct nand_chip *chip = &host->chip;
  1340. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1341. int i;
  1342. for (i = 0; i < cw_cnt; i++) {
  1343. u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
  1344. if (flash & (FS_OP_ERR | FS_MPU_ERR))
  1345. return -EIO;
  1346. }
  1347. return 0;
  1348. }
  1349. /* performs raw read for one codeword */
  1350. static int
  1351. qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1352. u8 *data_buf, u8 *oob_buf, int page, int cw)
  1353. {
  1354. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1355. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1356. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1357. int data_size1, data_size2, oob_size1, oob_size2;
  1358. int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
  1359. nand_read_page_op(chip, page, 0, NULL, 0);
  1360. host->use_ecc = false;
  1361. clear_bam_transaction(nandc);
  1362. set_address(host, host->cw_size * cw, page);
  1363. update_rw_regs(host, 1, true);
  1364. config_nand_page_read(nandc);
  1365. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1366. oob_size1 = host->bbm_size;
  1367. if (cw == (ecc->steps - 1)) {
  1368. data_size2 = ecc->size - data_size1 -
  1369. ((ecc->steps - 1) * 4);
  1370. oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
  1371. host->spare_bytes;
  1372. } else {
  1373. data_size2 = host->cw_data - data_size1;
  1374. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1375. }
  1376. if (nandc->props->is_bam) {
  1377. nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
  1378. read_loc += data_size1;
  1379. nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
  1380. read_loc += oob_size1;
  1381. nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
  1382. read_loc += data_size2;
  1383. nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
  1384. }
  1385. config_nand_cw_read(nandc, false);
  1386. read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
  1387. reg_off += data_size1;
  1388. read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
  1389. reg_off += oob_size1;
  1390. read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
  1391. reg_off += data_size2;
  1392. read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
  1393. ret = submit_descs(nandc);
  1394. free_descs(nandc);
  1395. if (ret) {
  1396. dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
  1397. return ret;
  1398. }
  1399. return check_flash_errors(host, 1);
  1400. }
  1401. /*
  1402. * Bitflips can happen in erased codewords also so this function counts the
  1403. * number of 0 in each CW for which ECC engine returns the uncorrectable
  1404. * error. The page will be assumed as erased if this count is less than or
  1405. * equal to the ecc->strength for each CW.
  1406. *
  1407. * 1. Both DATA and OOB need to be checked for number of 0. The
  1408. * top-level API can be called with only data buf or OOB buf so use
  1409. * chip->data_buf if data buf is null and chip->oob_poi if oob buf
  1410. * is null for copying the raw bytes.
  1411. * 2. Perform raw read for all the CW which has uncorrectable errors.
  1412. * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
  1413. * The BBM and spare bytes bit flip won’t affect the ECC so don’t check
  1414. * the number of bitflips in this area.
  1415. */
  1416. static int
  1417. check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
  1418. u8 *oob_buf, unsigned long uncorrectable_cws,
  1419. int page, unsigned int max_bitflips)
  1420. {
  1421. struct nand_chip *chip = &host->chip;
  1422. struct mtd_info *mtd = nand_to_mtd(chip);
  1423. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1424. u8 *cw_data_buf, *cw_oob_buf;
  1425. int cw, data_size, oob_size, ret = 0;
  1426. if (!data_buf) {
  1427. data_buf = chip->data_buf;
  1428. chip->pagebuf = -1;
  1429. }
  1430. if (!oob_buf) {
  1431. oob_buf = chip->oob_poi;
  1432. chip->pagebuf = -1;
  1433. }
  1434. for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
  1435. if (cw == (ecc->steps - 1)) {
  1436. data_size = ecc->size - ((ecc->steps - 1) * 4);
  1437. oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
  1438. } else {
  1439. data_size = host->cw_data;
  1440. oob_size = host->ecc_bytes_hw;
  1441. }
  1442. /* determine starting buffer address for current CW */
  1443. cw_data_buf = data_buf + (cw * host->cw_data);
  1444. cw_oob_buf = oob_buf + (cw * ecc->bytes);
  1445. ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
  1446. cw_oob_buf, page, cw);
  1447. if (ret)
  1448. return ret;
  1449. /*
  1450. * make sure it isn't an erased page reported
  1451. * as not-erased by HW because of a few bitflips
  1452. */
  1453. ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
  1454. cw_oob_buf + host->bbm_size,
  1455. oob_size, NULL,
  1456. 0, ecc->strength);
  1457. if (ret < 0) {
  1458. mtd->ecc_stats.failed++;
  1459. } else {
  1460. mtd->ecc_stats.corrected += ret;
  1461. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  1462. }
  1463. }
  1464. return max_bitflips;
  1465. }
  1466. /*
  1467. * reads back status registers set by the controller to notify page read
  1468. * errors. this is equivalent to what 'ecc->correct()' would do.
  1469. */
  1470. static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
  1471. u8 *oob_buf, int page)
  1472. {
  1473. struct nand_chip *chip = &host->chip;
  1474. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1475. struct mtd_info *mtd = nand_to_mtd(chip);
  1476. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1477. unsigned int max_bitflips = 0, uncorrectable_cws = 0;
  1478. struct read_stats *buf;
  1479. bool flash_op_err = false, erased;
  1480. int i;
  1481. u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
  1482. buf = (struct read_stats *)nandc->reg_read_buf;
  1483. nandc_read_buffer_sync(nandc, true);
  1484. for (i = 0; i < ecc->steps; i++, buf++) {
  1485. u32 flash, buffer, erased_cw;
  1486. int data_len, oob_len;
  1487. if (i == (ecc->steps - 1)) {
  1488. data_len = ecc->size - ((ecc->steps - 1) << 2);
  1489. oob_len = ecc->steps << 2;
  1490. } else {
  1491. data_len = host->cw_data;
  1492. oob_len = 0;
  1493. }
  1494. flash = le32_to_cpu(buf->flash);
  1495. buffer = le32_to_cpu(buf->buffer);
  1496. erased_cw = le32_to_cpu(buf->erased_cw);
  1497. /*
  1498. * Check ECC failure for each codeword. ECC failure can
  1499. * happen in either of the following conditions
  1500. * 1. If number of bitflips are greater than ECC engine
  1501. * capability.
  1502. * 2. If this codeword contains all 0xff for which erased
  1503. * codeword detection check will be done.
  1504. */
  1505. if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
  1506. /*
  1507. * For BCH ECC, ignore erased codeword errors, if
  1508. * ERASED_CW bits are set.
  1509. */
  1510. if (host->bch_enabled) {
  1511. erased = (erased_cw & ERASED_CW) == ERASED_CW ?
  1512. true : false;
  1513. /*
  1514. * For RS ECC, HW reports the erased CW by placing
  1515. * special characters at certain offsets in the buffer.
  1516. * These special characters will be valid only if
  1517. * complete page is read i.e. data_buf is not NULL.
  1518. */
  1519. } else if (data_buf) {
  1520. erased = erased_chunk_check_and_fixup(data_buf,
  1521. data_len);
  1522. } else {
  1523. erased = false;
  1524. }
  1525. if (!erased)
  1526. uncorrectable_cws |= BIT(i);
  1527. /*
  1528. * Check if MPU or any other operational error (timeout,
  1529. * device failure, etc.) happened for this codeword and
  1530. * make flash_op_err true. If flash_op_err is set, then
  1531. * EIO will be returned for page read.
  1532. */
  1533. } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
  1534. flash_op_err = true;
  1535. /*
  1536. * No ECC or operational errors happened. Check the number of
  1537. * bits corrected and update the ecc_stats.corrected.
  1538. */
  1539. } else {
  1540. unsigned int stat;
  1541. stat = buffer & BS_CORRECTABLE_ERR_MSK;
  1542. mtd->ecc_stats.corrected += stat;
  1543. max_bitflips = max(max_bitflips, stat);
  1544. }
  1545. if (data_buf)
  1546. data_buf += data_len;
  1547. if (oob_buf)
  1548. oob_buf += oob_len + ecc->bytes;
  1549. }
  1550. if (flash_op_err)
  1551. return -EIO;
  1552. if (!uncorrectable_cws)
  1553. return max_bitflips;
  1554. return check_for_erased_page(host, data_buf_start, oob_buf_start,
  1555. uncorrectable_cws, page,
  1556. max_bitflips);
  1557. }
  1558. /*
  1559. * helper to perform the actual page read operation, used by ecc->read_page(),
  1560. * ecc->read_oob()
  1561. */
  1562. static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
  1563. u8 *oob_buf, int page)
  1564. {
  1565. struct nand_chip *chip = &host->chip;
  1566. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1567. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1568. u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
  1569. int i, ret;
  1570. config_nand_page_read(nandc);
  1571. /* queue cmd descs for each codeword */
  1572. for (i = 0; i < ecc->steps; i++) {
  1573. int data_size, oob_size;
  1574. if (i == (ecc->steps - 1)) {
  1575. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1576. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1577. host->spare_bytes;
  1578. } else {
  1579. data_size = host->cw_data;
  1580. oob_size = host->ecc_bytes_hw + host->spare_bytes;
  1581. }
  1582. if (nandc->props->is_bam) {
  1583. if (data_buf && oob_buf) {
  1584. nandc_set_read_loc(nandc, 0, 0, data_size, 0);
  1585. nandc_set_read_loc(nandc, 1, data_size,
  1586. oob_size, 1);
  1587. } else if (data_buf) {
  1588. nandc_set_read_loc(nandc, 0, 0, data_size, 1);
  1589. } else {
  1590. nandc_set_read_loc(nandc, 0, data_size,
  1591. oob_size, 1);
  1592. }
  1593. }
  1594. config_nand_cw_read(nandc, true);
  1595. if (data_buf)
  1596. read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
  1597. data_size, 0);
  1598. /*
  1599. * when ecc is enabled, the controller doesn't read the real
  1600. * or dummy bad block markers in each chunk. To maintain a
  1601. * consistent layout across RAW and ECC reads, we just
  1602. * leave the real/dummy BBM offsets empty (i.e, filled with
  1603. * 0xffs)
  1604. */
  1605. if (oob_buf) {
  1606. int j;
  1607. for (j = 0; j < host->bbm_size; j++)
  1608. *oob_buf++ = 0xff;
  1609. read_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1610. oob_buf, oob_size, 0);
  1611. }
  1612. if (data_buf)
  1613. data_buf += data_size;
  1614. if (oob_buf)
  1615. oob_buf += oob_size;
  1616. }
  1617. ret = submit_descs(nandc);
  1618. free_descs(nandc);
  1619. if (ret) {
  1620. dev_err(nandc->dev, "failure to read page/oob\n");
  1621. return ret;
  1622. }
  1623. return parse_read_errors(host, data_buf_start, oob_buf_start, page);
  1624. }
  1625. /*
  1626. * a helper that copies the last step/codeword of a page (containing free oob)
  1627. * into our local buffer
  1628. */
  1629. static int copy_last_cw(struct qcom_nand_host *host, int page)
  1630. {
  1631. struct nand_chip *chip = &host->chip;
  1632. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1633. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1634. int size;
  1635. int ret;
  1636. clear_read_regs(nandc);
  1637. size = host->use_ecc ? host->cw_data : host->cw_size;
  1638. /* prepare a clean read buffer */
  1639. memset(nandc->data_buffer, 0xff, size);
  1640. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1641. update_rw_regs(host, 1, true);
  1642. config_nand_single_cw_page_read(nandc, host->use_ecc);
  1643. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
  1644. ret = submit_descs(nandc);
  1645. if (ret)
  1646. dev_err(nandc->dev, "failed to copy last codeword\n");
  1647. free_descs(nandc);
  1648. return ret;
  1649. }
  1650. /* implements ecc->read_page() */
  1651. static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
  1652. int oob_required, int page)
  1653. {
  1654. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1655. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1656. u8 *data_buf, *oob_buf = NULL;
  1657. nand_read_page_op(chip, page, 0, NULL, 0);
  1658. data_buf = buf;
  1659. oob_buf = oob_required ? chip->oob_poi : NULL;
  1660. clear_bam_transaction(nandc);
  1661. return read_page_ecc(host, data_buf, oob_buf, page);
  1662. }
  1663. /* implements ecc->read_page_raw() */
  1664. static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
  1665. int oob_required, int page)
  1666. {
  1667. struct mtd_info *mtd = nand_to_mtd(chip);
  1668. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1669. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1670. int cw, ret;
  1671. u8 *data_buf = buf, *oob_buf = chip->oob_poi;
  1672. for (cw = 0; cw < ecc->steps; cw++) {
  1673. ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
  1674. page, cw);
  1675. if (ret)
  1676. return ret;
  1677. data_buf += host->cw_data;
  1678. oob_buf += ecc->bytes;
  1679. }
  1680. return 0;
  1681. }
  1682. /* implements ecc->read_oob() */
  1683. static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
  1684. {
  1685. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1686. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1687. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1688. clear_read_regs(nandc);
  1689. clear_bam_transaction(nandc);
  1690. host->use_ecc = true;
  1691. set_address(host, 0, page);
  1692. update_rw_regs(host, ecc->steps, true);
  1693. return read_page_ecc(host, NULL, chip->oob_poi, page);
  1694. }
  1695. /* implements ecc->write_page() */
  1696. static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
  1697. int oob_required, int page)
  1698. {
  1699. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1700. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1701. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1702. u8 *data_buf, *oob_buf;
  1703. int i, ret;
  1704. nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  1705. clear_read_regs(nandc);
  1706. clear_bam_transaction(nandc);
  1707. data_buf = (u8 *)buf;
  1708. oob_buf = chip->oob_poi;
  1709. host->use_ecc = true;
  1710. update_rw_regs(host, ecc->steps, false);
  1711. config_nand_page_write(nandc);
  1712. for (i = 0; i < ecc->steps; i++) {
  1713. int data_size, oob_size;
  1714. if (i == (ecc->steps - 1)) {
  1715. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1716. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1717. host->spare_bytes;
  1718. } else {
  1719. data_size = host->cw_data;
  1720. oob_size = ecc->bytes;
  1721. }
  1722. write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
  1723. i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
  1724. /*
  1725. * when ECC is enabled, we don't really need to write anything
  1726. * to oob for the first n - 1 codewords since these oob regions
  1727. * just contain ECC bytes that's written by the controller
  1728. * itself. For the last codeword, we skip the bbm positions and
  1729. * write to the free oob area.
  1730. */
  1731. if (i == (ecc->steps - 1)) {
  1732. oob_buf += host->bbm_size;
  1733. write_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1734. oob_buf, oob_size, 0);
  1735. }
  1736. config_nand_cw_write(nandc);
  1737. data_buf += data_size;
  1738. oob_buf += oob_size;
  1739. }
  1740. ret = submit_descs(nandc);
  1741. if (ret)
  1742. dev_err(nandc->dev, "failure to write page\n");
  1743. free_descs(nandc);
  1744. if (!ret)
  1745. ret = nand_prog_page_end_op(chip);
  1746. return ret;
  1747. }
  1748. /* implements ecc->write_page_raw() */
  1749. static int qcom_nandc_write_page_raw(struct nand_chip *chip,
  1750. const uint8_t *buf, int oob_required,
  1751. int page)
  1752. {
  1753. struct mtd_info *mtd = nand_to_mtd(chip);
  1754. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1755. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1756. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1757. u8 *data_buf, *oob_buf;
  1758. int i, ret;
  1759. nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  1760. clear_read_regs(nandc);
  1761. clear_bam_transaction(nandc);
  1762. data_buf = (u8 *)buf;
  1763. oob_buf = chip->oob_poi;
  1764. host->use_ecc = false;
  1765. update_rw_regs(host, ecc->steps, false);
  1766. config_nand_page_write(nandc);
  1767. for (i = 0; i < ecc->steps; i++) {
  1768. int data_size1, data_size2, oob_size1, oob_size2;
  1769. int reg_off = FLASH_BUF_ACC;
  1770. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1771. oob_size1 = host->bbm_size;
  1772. if (i == (ecc->steps - 1)) {
  1773. data_size2 = ecc->size - data_size1 -
  1774. ((ecc->steps - 1) << 2);
  1775. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1776. host->spare_bytes;
  1777. } else {
  1778. data_size2 = host->cw_data - data_size1;
  1779. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1780. }
  1781. write_data_dma(nandc, reg_off, data_buf, data_size1,
  1782. NAND_BAM_NO_EOT);
  1783. reg_off += data_size1;
  1784. data_buf += data_size1;
  1785. write_data_dma(nandc, reg_off, oob_buf, oob_size1,
  1786. NAND_BAM_NO_EOT);
  1787. reg_off += oob_size1;
  1788. oob_buf += oob_size1;
  1789. write_data_dma(nandc, reg_off, data_buf, data_size2,
  1790. NAND_BAM_NO_EOT);
  1791. reg_off += data_size2;
  1792. data_buf += data_size2;
  1793. write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
  1794. oob_buf += oob_size2;
  1795. config_nand_cw_write(nandc);
  1796. }
  1797. ret = submit_descs(nandc);
  1798. if (ret)
  1799. dev_err(nandc->dev, "failure to write raw page\n");
  1800. free_descs(nandc);
  1801. if (!ret)
  1802. ret = nand_prog_page_end_op(chip);
  1803. return ret;
  1804. }
  1805. /*
  1806. * implements ecc->write_oob()
  1807. *
  1808. * the NAND controller cannot write only data or only OOB within a codeword
  1809. * since ECC is calculated for the combined codeword. So update the OOB from
  1810. * chip->oob_poi, and pad the data area with OxFF before writing.
  1811. */
  1812. static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
  1813. {
  1814. struct mtd_info *mtd = nand_to_mtd(chip);
  1815. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1816. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1817. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1818. u8 *oob = chip->oob_poi;
  1819. int data_size, oob_size;
  1820. int ret;
  1821. host->use_ecc = true;
  1822. clear_bam_transaction(nandc);
  1823. /* calculate the data and oob size for the last codeword/step */
  1824. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1825. oob_size = mtd->oobavail;
  1826. memset(nandc->data_buffer, 0xff, host->cw_data);
  1827. /* override new oob content to last codeword */
  1828. mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
  1829. 0, mtd->oobavail);
  1830. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1831. update_rw_regs(host, 1, false);
  1832. config_nand_page_write(nandc);
  1833. write_data_dma(nandc, FLASH_BUF_ACC,
  1834. nandc->data_buffer, data_size + oob_size, 0);
  1835. config_nand_cw_write(nandc);
  1836. ret = submit_descs(nandc);
  1837. free_descs(nandc);
  1838. if (ret) {
  1839. dev_err(nandc->dev, "failure to write oob\n");
  1840. return -EIO;
  1841. }
  1842. return nand_prog_page_end_op(chip);
  1843. }
  1844. static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
  1845. {
  1846. struct mtd_info *mtd = nand_to_mtd(chip);
  1847. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1848. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1849. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1850. int page, ret, bbpos, bad = 0;
  1851. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1852. /*
  1853. * configure registers for a raw sub page read, the address is set to
  1854. * the beginning of the last codeword, we don't care about reading ecc
  1855. * portion of oob. we just want the first few bytes from this codeword
  1856. * that contains the BBM
  1857. */
  1858. host->use_ecc = false;
  1859. clear_bam_transaction(nandc);
  1860. ret = copy_last_cw(host, page);
  1861. if (ret)
  1862. goto err;
  1863. if (check_flash_errors(host, 1)) {
  1864. dev_warn(nandc->dev, "error when trying to read BBM\n");
  1865. goto err;
  1866. }
  1867. bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1868. bad = nandc->data_buffer[bbpos] != 0xff;
  1869. if (chip->options & NAND_BUSWIDTH_16)
  1870. bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
  1871. err:
  1872. return bad;
  1873. }
  1874. static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
  1875. {
  1876. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1877. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1878. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1879. int page, ret;
  1880. clear_read_regs(nandc);
  1881. clear_bam_transaction(nandc);
  1882. /*
  1883. * to mark the BBM as bad, we flash the entire last codeword with 0s.
  1884. * we don't care about the rest of the content in the codeword since
  1885. * we aren't going to use this block again
  1886. */
  1887. memset(nandc->data_buffer, 0x00, host->cw_size);
  1888. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1889. /* prepare write */
  1890. host->use_ecc = false;
  1891. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1892. update_rw_regs(host, 1, false);
  1893. config_nand_page_write(nandc);
  1894. write_data_dma(nandc, FLASH_BUF_ACC,
  1895. nandc->data_buffer, host->cw_size, 0);
  1896. config_nand_cw_write(nandc);
  1897. ret = submit_descs(nandc);
  1898. free_descs(nandc);
  1899. if (ret) {
  1900. dev_err(nandc->dev, "failure to update BBM\n");
  1901. return -EIO;
  1902. }
  1903. return nand_prog_page_end_op(chip);
  1904. }
  1905. /*
  1906. * the three functions below implement chip->legacy.read_byte(),
  1907. * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
  1908. * aren't used for reading/writing page data, they are used for smaller data
  1909. * like reading id, status etc
  1910. */
  1911. static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
  1912. {
  1913. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1914. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1915. u8 *buf = nandc->data_buffer;
  1916. u8 ret = 0x0;
  1917. if (host->last_command == NAND_CMD_STATUS) {
  1918. ret = host->status;
  1919. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1920. return ret;
  1921. }
  1922. if (nandc->buf_start < nandc->buf_count)
  1923. ret = buf[nandc->buf_start++];
  1924. return ret;
  1925. }
  1926. static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
  1927. {
  1928. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1929. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1930. memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
  1931. nandc->buf_start += real_len;
  1932. }
  1933. static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
  1934. int len)
  1935. {
  1936. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1937. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1938. memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
  1939. nandc->buf_start += real_len;
  1940. }
  1941. /* we support only one external chip for now */
  1942. static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
  1943. {
  1944. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1945. if (chipnr <= 0)
  1946. return;
  1947. dev_warn(nandc->dev, "invalid chip select\n");
  1948. }
  1949. /*
  1950. * NAND controller page layout info
  1951. *
  1952. * Layout with ECC enabled:
  1953. *
  1954. * |----------------------| |---------------------------------|
  1955. * | xx.......yy| | *********xx.......yy|
  1956. * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
  1957. * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
  1958. * | xx.......yy| | *********xx.......yy|
  1959. * |----------------------| |---------------------------------|
  1960. * codeword 1,2..n-1 codeword n
  1961. * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
  1962. *
  1963. * n = Number of codewords in the page
  1964. * . = ECC bytes
  1965. * * = Spare/free bytes
  1966. * x = Unused byte(s)
  1967. * y = Reserved byte(s)
  1968. *
  1969. * 2K page: n = 4, spare = 16 bytes
  1970. * 4K page: n = 8, spare = 32 bytes
  1971. * 8K page: n = 16, spare = 64 bytes
  1972. *
  1973. * the qcom nand controller operates at a sub page/codeword level. each
  1974. * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
  1975. * the number of ECC bytes vary based on the ECC strength and the bus width.
  1976. *
  1977. * the first n - 1 codewords contains 516 bytes of user data, the remaining
  1978. * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
  1979. * both user data and spare(oobavail) bytes that sum up to 516 bytes.
  1980. *
  1981. * When we access a page with ECC enabled, the reserved bytes(s) are not
  1982. * accessible at all. When reading, we fill up these unreadable positions
  1983. * with 0xffs. When writing, the controller skips writing the inaccessible
  1984. * bytes.
  1985. *
  1986. * Layout with ECC disabled:
  1987. *
  1988. * |------------------------------| |---------------------------------------|
  1989. * | yy xx.......| | bb *********xx.......|
  1990. * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
  1991. * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
  1992. * | yy xx.......| | bb *********xx.......|
  1993. * |------------------------------| |---------------------------------------|
  1994. * codeword 1,2..n-1 codeword n
  1995. * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
  1996. *
  1997. * n = Number of codewords in the page
  1998. * . = ECC bytes
  1999. * * = Spare/free bytes
  2000. * x = Unused byte(s)
  2001. * y = Dummy Bad Bock byte(s)
  2002. * b = Real Bad Block byte(s)
  2003. * size1/size2 = function of codeword size and 'n'
  2004. *
  2005. * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
  2006. * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
  2007. * Block Markers. In the last codeword, this position contains the real BBM
  2008. *
  2009. * In order to have a consistent layout between RAW and ECC modes, we assume
  2010. * the following OOB layout arrangement:
  2011. *
  2012. * |-----------| |--------------------|
  2013. * |yyxx.......| |bb*********xx.......|
  2014. * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
  2015. * |yyxx.......| |bb*********xx.......|
  2016. * |yyxx.......| |bb*********xx.......|
  2017. * |-----------| |--------------------|
  2018. * first n - 1 nth OOB region
  2019. * OOB regions
  2020. *
  2021. * n = Number of codewords in the page
  2022. * . = ECC bytes
  2023. * * = FREE OOB bytes
  2024. * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
  2025. * x = Unused byte(s)
  2026. * b = Real bad block byte(s) (inaccessible when ECC enabled)
  2027. *
  2028. * This layout is read as is when ECC is disabled. When ECC is enabled, the
  2029. * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
  2030. * and assumed as 0xffs when we read a page/oob. The ECC, unused and
  2031. * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
  2032. * the sum of the three).
  2033. */
  2034. static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  2035. struct mtd_oob_region *oobregion)
  2036. {
  2037. struct nand_chip *chip = mtd_to_nand(mtd);
  2038. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  2039. struct nand_ecc_ctrl *ecc = &chip->ecc;
  2040. if (section > 1)
  2041. return -ERANGE;
  2042. if (!section) {
  2043. oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
  2044. host->bbm_size;
  2045. oobregion->offset = 0;
  2046. } else {
  2047. oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
  2048. oobregion->offset = mtd->oobsize - oobregion->length;
  2049. }
  2050. return 0;
  2051. }
  2052. static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
  2053. struct mtd_oob_region *oobregion)
  2054. {
  2055. struct nand_chip *chip = mtd_to_nand(mtd);
  2056. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  2057. struct nand_ecc_ctrl *ecc = &chip->ecc;
  2058. if (section)
  2059. return -ERANGE;
  2060. oobregion->length = ecc->steps * 4;
  2061. oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
  2062. return 0;
  2063. }
  2064. static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
  2065. .ecc = qcom_nand_ooblayout_ecc,
  2066. .free = qcom_nand_ooblayout_free,
  2067. };
  2068. static int
  2069. qcom_nandc_calc_ecc_bytes(int step_size, int strength)
  2070. {
  2071. return strength == 4 ? 12 : 16;
  2072. }
  2073. NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
  2074. NANDC_STEP_SIZE, 4, 8);
  2075. static int qcom_nand_attach_chip(struct nand_chip *chip)
  2076. {
  2077. struct mtd_info *mtd = nand_to_mtd(chip);
  2078. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  2079. struct nand_ecc_ctrl *ecc = &chip->ecc;
  2080. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  2081. int cwperpage, bad_block_byte, ret;
  2082. bool wide_bus;
  2083. int ecc_mode = 1;
  2084. /* controller only supports 512 bytes data steps */
  2085. ecc->size = NANDC_STEP_SIZE;
  2086. wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
  2087. cwperpage = mtd->writesize / NANDC_STEP_SIZE;
  2088. /*
  2089. * Each CW has 4 available OOB bytes which will be protected with ECC
  2090. * so remaining bytes can be used for ECC.
  2091. */
  2092. ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
  2093. mtd->oobsize - (cwperpage * 4));
  2094. if (ret) {
  2095. dev_err(nandc->dev, "No valid ECC settings possible\n");
  2096. return ret;
  2097. }
  2098. if (ecc->strength >= 8) {
  2099. /* 8 bit ECC defaults to BCH ECC on all platforms */
  2100. host->bch_enabled = true;
  2101. ecc_mode = 1;
  2102. if (wide_bus) {
  2103. host->ecc_bytes_hw = 14;
  2104. host->spare_bytes = 0;
  2105. host->bbm_size = 2;
  2106. } else {
  2107. host->ecc_bytes_hw = 13;
  2108. host->spare_bytes = 2;
  2109. host->bbm_size = 1;
  2110. }
  2111. } else {
  2112. /*
  2113. * if the controller supports BCH for 4 bit ECC, the controller
  2114. * uses lesser bytes for ECC. If RS is used, the ECC bytes is
  2115. * always 10 bytes
  2116. */
  2117. if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
  2118. /* BCH */
  2119. host->bch_enabled = true;
  2120. ecc_mode = 0;
  2121. if (wide_bus) {
  2122. host->ecc_bytes_hw = 8;
  2123. host->spare_bytes = 2;
  2124. host->bbm_size = 2;
  2125. } else {
  2126. host->ecc_bytes_hw = 7;
  2127. host->spare_bytes = 4;
  2128. host->bbm_size = 1;
  2129. }
  2130. } else {
  2131. /* RS */
  2132. host->ecc_bytes_hw = 10;
  2133. if (wide_bus) {
  2134. host->spare_bytes = 0;
  2135. host->bbm_size = 2;
  2136. } else {
  2137. host->spare_bytes = 1;
  2138. host->bbm_size = 1;
  2139. }
  2140. }
  2141. }
  2142. /*
  2143. * we consider ecc->bytes as the sum of all the non-data content in a
  2144. * step. It gives us a clean representation of the oob area (even if
  2145. * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
  2146. * ECC and 12 bytes for 4 bit ECC
  2147. */
  2148. ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
  2149. ecc->read_page = qcom_nandc_read_page;
  2150. ecc->read_page_raw = qcom_nandc_read_page_raw;
  2151. ecc->read_oob = qcom_nandc_read_oob;
  2152. ecc->write_page = qcom_nandc_write_page;
  2153. ecc->write_page_raw = qcom_nandc_write_page_raw;
  2154. ecc->write_oob = qcom_nandc_write_oob;
  2155. ecc->mode = NAND_ECC_HW;
  2156. mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
  2157. nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
  2158. cwperpage);
  2159. /*
  2160. * DATA_UD_BYTES varies based on whether the read/write command protects
  2161. * spare data with ECC too. We protect spare data by default, so we set
  2162. * it to main + spare data, which are 512 and 4 bytes respectively.
  2163. */
  2164. host->cw_data = 516;
  2165. /*
  2166. * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
  2167. * for 8 bit ECC
  2168. */
  2169. host->cw_size = host->cw_data + ecc->bytes;
  2170. bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
  2171. host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
  2172. | host->cw_data << UD_SIZE_BYTES
  2173. | 0 << DISABLE_STATUS_AFTER_WRITE
  2174. | 5 << NUM_ADDR_CYCLES
  2175. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
  2176. | 0 << STATUS_BFR_READ
  2177. | 1 << SET_RD_MODE_AFTER_STATUS
  2178. | host->spare_bytes << SPARE_SIZE_BYTES;
  2179. host->cfg1 = 7 << NAND_RECOVERY_CYCLES
  2180. | 0 << CS_ACTIVE_BSY
  2181. | bad_block_byte << BAD_BLOCK_BYTE_NUM
  2182. | 0 << BAD_BLOCK_IN_SPARE_AREA
  2183. | 2 << WR_RD_BSY_GAP
  2184. | wide_bus << WIDE_FLASH
  2185. | host->bch_enabled << ENABLE_BCH_ECC;
  2186. host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
  2187. | host->cw_size << UD_SIZE_BYTES
  2188. | 5 << NUM_ADDR_CYCLES
  2189. | 0 << SPARE_SIZE_BYTES;
  2190. host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
  2191. | 0 << CS_ACTIVE_BSY
  2192. | 17 << BAD_BLOCK_BYTE_NUM
  2193. | 1 << BAD_BLOCK_IN_SPARE_AREA
  2194. | 2 << WR_RD_BSY_GAP
  2195. | wide_bus << WIDE_FLASH
  2196. | 1 << DEV0_CFG1_ECC_DISABLE;
  2197. host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
  2198. | 0 << ECC_SW_RESET
  2199. | host->cw_data << ECC_NUM_DATA_BYTES
  2200. | 1 << ECC_FORCE_CLK_OPEN
  2201. | ecc_mode << ECC_MODE
  2202. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
  2203. host->ecc_buf_cfg = 0x203 << NUM_STEPS;
  2204. host->clrflashstatus = FS_READY_BSY_N;
  2205. host->clrreadstatus = 0xc0;
  2206. nandc->regs->erased_cw_detect_cfg_clr =
  2207. cpu_to_le32(CLR_ERASED_PAGE_DET);
  2208. nandc->regs->erased_cw_detect_cfg_set =
  2209. cpu_to_le32(SET_ERASED_PAGE_DET);
  2210. dev_dbg(nandc->dev,
  2211. "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
  2212. host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
  2213. host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
  2214. cwperpage);
  2215. return 0;
  2216. }
  2217. static const struct nand_controller_ops qcom_nandc_ops = {
  2218. .attach_chip = qcom_nand_attach_chip,
  2219. };
  2220. static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
  2221. {
  2222. int ret;
  2223. ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
  2224. if (ret) {
  2225. dev_err(nandc->dev, "failed to set DMA mask\n");
  2226. return ret;
  2227. }
  2228. /*
  2229. * we use the internal buffer for reading ONFI params, reading small
  2230. * data like ID and status, and preforming read-copy-write operations
  2231. * when writing to a codeword partially. 532 is the maximum possible
  2232. * size of a codeword for our nand controller
  2233. */
  2234. nandc->buf_size = 532;
  2235. nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
  2236. GFP_KERNEL);
  2237. if (!nandc->data_buffer)
  2238. return -ENOMEM;
  2239. nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
  2240. GFP_KERNEL);
  2241. if (!nandc->regs)
  2242. return -ENOMEM;
  2243. nandc->reg_read_buf = devm_kcalloc(nandc->dev,
  2244. MAX_REG_RD, sizeof(*nandc->reg_read_buf),
  2245. GFP_KERNEL);
  2246. if (!nandc->reg_read_buf)
  2247. return -ENOMEM;
  2248. if (nandc->props->is_bam) {
  2249. nandc->reg_read_dma =
  2250. dma_map_single(nandc->dev, nandc->reg_read_buf,
  2251. MAX_REG_RD *
  2252. sizeof(*nandc->reg_read_buf),
  2253. DMA_FROM_DEVICE);
  2254. if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
  2255. dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
  2256. return -EIO;
  2257. }
  2258. nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
  2259. if (!nandc->tx_chan) {
  2260. dev_err(nandc->dev, "failed to request tx channel\n");
  2261. return -ENODEV;
  2262. }
  2263. nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
  2264. if (!nandc->rx_chan) {
  2265. dev_err(nandc->dev, "failed to request rx channel\n");
  2266. return -ENODEV;
  2267. }
  2268. nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
  2269. if (!nandc->cmd_chan) {
  2270. dev_err(nandc->dev, "failed to request cmd channel\n");
  2271. return -ENODEV;
  2272. }
  2273. /*
  2274. * Initially allocate BAM transaction to read ONFI param page.
  2275. * After detecting all the devices, this BAM transaction will
  2276. * be freed and the next BAM tranasction will be allocated with
  2277. * maximum codeword size
  2278. */
  2279. nandc->max_cwperpage = 1;
  2280. nandc->bam_txn = alloc_bam_transaction(nandc);
  2281. if (!nandc->bam_txn) {
  2282. dev_err(nandc->dev,
  2283. "failed to allocate bam transaction\n");
  2284. return -ENOMEM;
  2285. }
  2286. } else {
  2287. nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
  2288. if (!nandc->chan) {
  2289. dev_err(nandc->dev,
  2290. "failed to request slave channel\n");
  2291. return -ENODEV;
  2292. }
  2293. }
  2294. INIT_LIST_HEAD(&nandc->desc_list);
  2295. INIT_LIST_HEAD(&nandc->host_list);
  2296. nand_controller_init(&nandc->controller);
  2297. nandc->controller.ops = &qcom_nandc_ops;
  2298. return 0;
  2299. }
  2300. static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
  2301. {
  2302. if (nandc->props->is_bam) {
  2303. if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
  2304. dma_unmap_single(nandc->dev, nandc->reg_read_dma,
  2305. MAX_REG_RD *
  2306. sizeof(*nandc->reg_read_buf),
  2307. DMA_FROM_DEVICE);
  2308. if (nandc->tx_chan)
  2309. dma_release_channel(nandc->tx_chan);
  2310. if (nandc->rx_chan)
  2311. dma_release_channel(nandc->rx_chan);
  2312. if (nandc->cmd_chan)
  2313. dma_release_channel(nandc->cmd_chan);
  2314. } else {
  2315. if (nandc->chan)
  2316. dma_release_channel(nandc->chan);
  2317. }
  2318. }
  2319. /* one time setup of a few nand controller registers */
  2320. static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
  2321. {
  2322. u32 nand_ctrl;
  2323. /* kill onenand */
  2324. nandc_write(nandc, SFLASHC_BURST_CFG, 0);
  2325. nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
  2326. NAND_DEV_CMD_VLD_VAL);
  2327. /* enable ADM or BAM DMA */
  2328. if (nandc->props->is_bam) {
  2329. nand_ctrl = nandc_read(nandc, NAND_CTRL);
  2330. nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
  2331. } else {
  2332. nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  2333. }
  2334. /* save the original values of these registers */
  2335. nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
  2336. nandc->vld = NAND_DEV_CMD_VLD_VAL;
  2337. return 0;
  2338. }
  2339. static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
  2340. struct qcom_nand_host *host,
  2341. struct device_node *dn)
  2342. {
  2343. struct nand_chip *chip = &host->chip;
  2344. struct mtd_info *mtd = nand_to_mtd(chip);
  2345. struct device *dev = nandc->dev;
  2346. int ret;
  2347. ret = of_property_read_u32(dn, "reg", &host->cs);
  2348. if (ret) {
  2349. dev_err(dev, "can't get chip-select\n");
  2350. return -ENXIO;
  2351. }
  2352. nand_set_flash_node(chip, dn);
  2353. mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
  2354. if (!mtd->name)
  2355. return -ENOMEM;
  2356. mtd->owner = THIS_MODULE;
  2357. mtd->dev.parent = dev;
  2358. chip->legacy.cmdfunc = qcom_nandc_command;
  2359. chip->select_chip = qcom_nandc_select_chip;
  2360. chip->legacy.read_byte = qcom_nandc_read_byte;
  2361. chip->legacy.read_buf = qcom_nandc_read_buf;
  2362. chip->legacy.write_buf = qcom_nandc_write_buf;
  2363. chip->legacy.set_features = nand_get_set_features_notsupp;
  2364. chip->legacy.get_features = nand_get_set_features_notsupp;
  2365. /*
  2366. * the bad block marker is readable only when we read the last codeword
  2367. * of a page with ECC disabled. currently, the nand_base and nand_bbt
  2368. * helpers don't allow us to read BB from a nand chip with ECC
  2369. * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
  2370. * and block_markbad helpers until we permanently switch to using
  2371. * MTD_OPS_RAW for all drivers (with the help of badblockbits)
  2372. */
  2373. chip->legacy.block_bad = qcom_nandc_block_bad;
  2374. chip->legacy.block_markbad = qcom_nandc_block_markbad;
  2375. chip->controller = &nandc->controller;
  2376. chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
  2377. NAND_SKIP_BBTSCAN;
  2378. /* set up initial status value */
  2379. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  2380. ret = nand_scan(chip, 1);
  2381. if (ret)
  2382. return ret;
  2383. ret = mtd_device_register(mtd, NULL, 0);
  2384. if (ret)
  2385. nand_cleanup(chip);
  2386. return ret;
  2387. }
  2388. static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
  2389. {
  2390. struct device *dev = nandc->dev;
  2391. struct device_node *dn = dev->of_node, *child;
  2392. struct qcom_nand_host *host;
  2393. int ret;
  2394. if (nandc->props->is_bam) {
  2395. free_bam_transaction(nandc);
  2396. nandc->bam_txn = alloc_bam_transaction(nandc);
  2397. if (!nandc->bam_txn) {
  2398. dev_err(nandc->dev,
  2399. "failed to allocate bam transaction\n");
  2400. return -ENOMEM;
  2401. }
  2402. }
  2403. for_each_available_child_of_node(dn, child) {
  2404. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  2405. if (!host) {
  2406. of_node_put(child);
  2407. return -ENOMEM;
  2408. }
  2409. ret = qcom_nand_host_init_and_register(nandc, host, child);
  2410. if (ret) {
  2411. devm_kfree(dev, host);
  2412. continue;
  2413. }
  2414. list_add_tail(&host->node, &nandc->host_list);
  2415. }
  2416. if (list_empty(&nandc->host_list))
  2417. return -ENODEV;
  2418. return 0;
  2419. }
  2420. /* parse custom DT properties here */
  2421. static int qcom_nandc_parse_dt(struct platform_device *pdev)
  2422. {
  2423. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  2424. struct device_node *np = nandc->dev->of_node;
  2425. int ret;
  2426. if (!nandc->props->is_bam) {
  2427. ret = of_property_read_u32(np, "qcom,cmd-crci",
  2428. &nandc->cmd_crci);
  2429. if (ret) {
  2430. dev_err(nandc->dev, "command CRCI unspecified\n");
  2431. return ret;
  2432. }
  2433. ret = of_property_read_u32(np, "qcom,data-crci",
  2434. &nandc->data_crci);
  2435. if (ret) {
  2436. dev_err(nandc->dev, "data CRCI unspecified\n");
  2437. return ret;
  2438. }
  2439. }
  2440. return 0;
  2441. }
  2442. static int qcom_nandc_probe(struct platform_device *pdev)
  2443. {
  2444. struct qcom_nand_controller *nandc;
  2445. const void *dev_data;
  2446. struct device *dev = &pdev->dev;
  2447. struct resource *res;
  2448. int ret;
  2449. nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
  2450. if (!nandc)
  2451. return -ENOMEM;
  2452. platform_set_drvdata(pdev, nandc);
  2453. nandc->dev = dev;
  2454. dev_data = of_device_get_match_data(dev);
  2455. if (!dev_data) {
  2456. dev_err(&pdev->dev, "failed to get device data\n");
  2457. return -ENODEV;
  2458. }
  2459. nandc->props = dev_data;
  2460. nandc->core_clk = devm_clk_get(dev, "core");
  2461. if (IS_ERR(nandc->core_clk))
  2462. return PTR_ERR(nandc->core_clk);
  2463. nandc->aon_clk = devm_clk_get(dev, "aon");
  2464. if (IS_ERR(nandc->aon_clk))
  2465. return PTR_ERR(nandc->aon_clk);
  2466. ret = qcom_nandc_parse_dt(pdev);
  2467. if (ret)
  2468. return ret;
  2469. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2470. nandc->base = devm_ioremap_resource(dev, res);
  2471. if (IS_ERR(nandc->base))
  2472. return PTR_ERR(nandc->base);
  2473. nandc->base_phys = res->start;
  2474. nandc->base_dma = dma_map_resource(dev, res->start,
  2475. resource_size(res),
  2476. DMA_BIDIRECTIONAL, 0);
  2477. if (!nandc->base_dma)
  2478. return -ENXIO;
  2479. ret = qcom_nandc_alloc(nandc);
  2480. if (ret)
  2481. goto err_nandc_alloc;
  2482. ret = clk_prepare_enable(nandc->core_clk);
  2483. if (ret)
  2484. goto err_core_clk;
  2485. ret = clk_prepare_enable(nandc->aon_clk);
  2486. if (ret)
  2487. goto err_aon_clk;
  2488. ret = qcom_nandc_setup(nandc);
  2489. if (ret)
  2490. goto err_setup;
  2491. ret = qcom_probe_nand_devices(nandc);
  2492. if (ret)
  2493. goto err_setup;
  2494. return 0;
  2495. err_setup:
  2496. clk_disable_unprepare(nandc->aon_clk);
  2497. err_aon_clk:
  2498. clk_disable_unprepare(nandc->core_clk);
  2499. err_core_clk:
  2500. qcom_nandc_unalloc(nandc);
  2501. err_nandc_alloc:
  2502. dma_unmap_resource(dev, res->start, resource_size(res),
  2503. DMA_BIDIRECTIONAL, 0);
  2504. return ret;
  2505. }
  2506. static int qcom_nandc_remove(struct platform_device *pdev)
  2507. {
  2508. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  2509. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2510. struct qcom_nand_host *host;
  2511. list_for_each_entry(host, &nandc->host_list, node)
  2512. nand_release(&host->chip);
  2513. qcom_nandc_unalloc(nandc);
  2514. clk_disable_unprepare(nandc->aon_clk);
  2515. clk_disable_unprepare(nandc->core_clk);
  2516. dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
  2517. DMA_BIDIRECTIONAL, 0);
  2518. return 0;
  2519. }
  2520. static const struct qcom_nandc_props ipq806x_nandc_props = {
  2521. .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
  2522. .is_bam = false,
  2523. .dev_cmd_reg_start = 0x0,
  2524. };
  2525. static const struct qcom_nandc_props ipq4019_nandc_props = {
  2526. .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
  2527. .is_bam = true,
  2528. .dev_cmd_reg_start = 0x0,
  2529. };
  2530. static const struct qcom_nandc_props ipq8074_nandc_props = {
  2531. .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
  2532. .is_bam = true,
  2533. .dev_cmd_reg_start = 0x7000,
  2534. };
  2535. /*
  2536. * data will hold a struct pointer containing more differences once we support
  2537. * more controller variants
  2538. */
  2539. static const struct of_device_id qcom_nandc_of_match[] = {
  2540. {
  2541. .compatible = "qcom,ipq806x-nand",
  2542. .data = &ipq806x_nandc_props,
  2543. },
  2544. {
  2545. .compatible = "qcom,ipq4019-nand",
  2546. .data = &ipq4019_nandc_props,
  2547. },
  2548. {
  2549. .compatible = "qcom,ipq8074-nand",
  2550. .data = &ipq8074_nandc_props,
  2551. },
  2552. {}
  2553. };
  2554. MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
  2555. static struct platform_driver qcom_nandc_driver = {
  2556. .driver = {
  2557. .name = "qcom-nandc",
  2558. .of_match_table = qcom_nandc_of_match,
  2559. },
  2560. .probe = qcom_nandc_probe,
  2561. .remove = qcom_nandc_remove,
  2562. };
  2563. module_platform_driver(qcom_nandc_driver);
  2564. MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
  2565. MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
  2566. MODULE_LICENSE("GPL v2");