nand_base.c 175 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816
  1. /*
  2. * Overview:
  3. * This is the generic MTD driver for NAND flash devices. It should be
  4. * capable of working with almost all NAND chips currently available.
  5. *
  6. * Additional technical information is available on
  7. * http://www.linux-mtd.infradead.org/doc/nand.html
  8. *
  9. * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
  10. * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
  11. *
  12. * Credits:
  13. * David Woodhouse for adding multichip support
  14. *
  15. * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
  16. * rework for 2K page size chips
  17. *
  18. * TODO:
  19. * Enable cached programming for 2k page size chips
  20. * Check, if mtd->ecctype should be set to MTD_ECC_HW
  21. * if we have HW ECC support.
  22. * BBT table is not serialized, has to be fixed
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License version 2 as
  26. * published by the Free Software Foundation.
  27. *
  28. */
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30. #include <linux/module.h>
  31. #include <linux/delay.h>
  32. #include <linux/errno.h>
  33. #include <linux/err.h>
  34. #include <linux/sched.h>
  35. #include <linux/slab.h>
  36. #include <linux/mm.h>
  37. #include <linux/nmi.h>
  38. #include <linux/types.h>
  39. #include <linux/mtd/mtd.h>
  40. #include <linux/mtd/rawnand.h>
  41. #include <linux/mtd/nand_ecc.h>
  42. #include <linux/mtd/nand_bch.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/bitops.h>
  45. #include <linux/io.h>
  46. #include <linux/mtd/partitions.h>
  47. #include <linux/of.h>
  48. static int nand_get_device(struct mtd_info *mtd, int new_state);
  49. static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
  50. struct mtd_oob_ops *ops);
  51. /* Define default oob placement schemes for large and small page devices */
  52. static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
  53. struct mtd_oob_region *oobregion)
  54. {
  55. struct nand_chip *chip = mtd_to_nand(mtd);
  56. struct nand_ecc_ctrl *ecc = &chip->ecc;
  57. if (section > 1)
  58. return -ERANGE;
  59. if (!section) {
  60. oobregion->offset = 0;
  61. if (mtd->oobsize == 16)
  62. oobregion->length = 4;
  63. else
  64. oobregion->length = 3;
  65. } else {
  66. if (mtd->oobsize == 8)
  67. return -ERANGE;
  68. oobregion->offset = 6;
  69. oobregion->length = ecc->total - 4;
  70. }
  71. return 0;
  72. }
  73. static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
  74. struct mtd_oob_region *oobregion)
  75. {
  76. if (section > 1)
  77. return -ERANGE;
  78. if (mtd->oobsize == 16) {
  79. if (section)
  80. return -ERANGE;
  81. oobregion->length = 8;
  82. oobregion->offset = 8;
  83. } else {
  84. oobregion->length = 2;
  85. if (!section)
  86. oobregion->offset = 3;
  87. else
  88. oobregion->offset = 6;
  89. }
  90. return 0;
  91. }
  92. const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
  93. .ecc = nand_ooblayout_ecc_sp,
  94. .free = nand_ooblayout_free_sp,
  95. };
  96. EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
  97. static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
  98. struct mtd_oob_region *oobregion)
  99. {
  100. struct nand_chip *chip = mtd_to_nand(mtd);
  101. struct nand_ecc_ctrl *ecc = &chip->ecc;
  102. if (section || !ecc->total)
  103. return -ERANGE;
  104. oobregion->length = ecc->total;
  105. oobregion->offset = mtd->oobsize - oobregion->length;
  106. return 0;
  107. }
  108. static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
  109. struct mtd_oob_region *oobregion)
  110. {
  111. struct nand_chip *chip = mtd_to_nand(mtd);
  112. struct nand_ecc_ctrl *ecc = &chip->ecc;
  113. if (section)
  114. return -ERANGE;
  115. oobregion->length = mtd->oobsize - ecc->total - 2;
  116. oobregion->offset = 2;
  117. return 0;
  118. }
  119. const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
  120. .ecc = nand_ooblayout_ecc_lp,
  121. .free = nand_ooblayout_free_lp,
  122. };
  123. EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
  124. /*
  125. * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
  126. * are placed at a fixed offset.
  127. */
  128. static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
  129. struct mtd_oob_region *oobregion)
  130. {
  131. struct nand_chip *chip = mtd_to_nand(mtd);
  132. struct nand_ecc_ctrl *ecc = &chip->ecc;
  133. if (section)
  134. return -ERANGE;
  135. switch (mtd->oobsize) {
  136. case 64:
  137. oobregion->offset = 40;
  138. break;
  139. case 128:
  140. oobregion->offset = 80;
  141. break;
  142. default:
  143. return -EINVAL;
  144. }
  145. oobregion->length = ecc->total;
  146. if (oobregion->offset + oobregion->length > mtd->oobsize)
  147. return -ERANGE;
  148. return 0;
  149. }
  150. static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
  151. struct mtd_oob_region *oobregion)
  152. {
  153. struct nand_chip *chip = mtd_to_nand(mtd);
  154. struct nand_ecc_ctrl *ecc = &chip->ecc;
  155. int ecc_offset = 0;
  156. if (section < 0 || section > 1)
  157. return -ERANGE;
  158. switch (mtd->oobsize) {
  159. case 64:
  160. ecc_offset = 40;
  161. break;
  162. case 128:
  163. ecc_offset = 80;
  164. break;
  165. default:
  166. return -EINVAL;
  167. }
  168. if (section == 0) {
  169. oobregion->offset = 2;
  170. oobregion->length = ecc_offset - 2;
  171. } else {
  172. oobregion->offset = ecc_offset + ecc->total;
  173. oobregion->length = mtd->oobsize - oobregion->offset;
  174. }
  175. return 0;
  176. }
  177. static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
  178. .ecc = nand_ooblayout_ecc_lp_hamming,
  179. .free = nand_ooblayout_free_lp_hamming,
  180. };
  181. static int check_offs_len(struct mtd_info *mtd,
  182. loff_t ofs, uint64_t len)
  183. {
  184. struct nand_chip *chip = mtd_to_nand(mtd);
  185. int ret = 0;
  186. /* Start address must align on block boundary */
  187. if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
  188. pr_debug("%s: unaligned address\n", __func__);
  189. ret = -EINVAL;
  190. }
  191. /* Length must align on block boundary */
  192. if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
  193. pr_debug("%s: length not block aligned\n", __func__);
  194. ret = -EINVAL;
  195. }
  196. return ret;
  197. }
  198. /**
  199. * nand_release_device - [GENERIC] release chip
  200. * @mtd: MTD device structure
  201. *
  202. * Release chip lock and wake up anyone waiting on the device.
  203. */
  204. static void nand_release_device(struct mtd_info *mtd)
  205. {
  206. struct nand_chip *chip = mtd_to_nand(mtd);
  207. /* Release the controller and the chip */
  208. spin_lock(&chip->controller->lock);
  209. chip->controller->active = NULL;
  210. chip->state = FL_READY;
  211. wake_up(&chip->controller->wq);
  212. spin_unlock(&chip->controller->lock);
  213. }
  214. /**
  215. * nand_read_byte - [DEFAULT] read one byte from the chip
  216. * @chip: NAND chip object
  217. *
  218. * Default read function for 8bit buswidth
  219. */
  220. static uint8_t nand_read_byte(struct nand_chip *chip)
  221. {
  222. return readb(chip->legacy.IO_ADDR_R);
  223. }
  224. /**
  225. * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
  226. * @chip: NAND chip object
  227. *
  228. * Default read function for 16bit buswidth with endianness conversion.
  229. *
  230. */
  231. static uint8_t nand_read_byte16(struct nand_chip *chip)
  232. {
  233. return (uint8_t) cpu_to_le16(readw(chip->legacy.IO_ADDR_R));
  234. }
  235. /**
  236. * nand_select_chip - [DEFAULT] control CE line
  237. * @chip: NAND chip object
  238. * @chipnr: chipnumber to select, -1 for deselect
  239. *
  240. * Default select function for 1 chip devices.
  241. */
  242. static void nand_select_chip(struct nand_chip *chip, int chipnr)
  243. {
  244. switch (chipnr) {
  245. case -1:
  246. chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
  247. 0 | NAND_CTRL_CHANGE);
  248. break;
  249. case 0:
  250. break;
  251. default:
  252. BUG();
  253. }
  254. }
  255. /**
  256. * nand_write_byte - [DEFAULT] write single byte to chip
  257. * @chip: NAND chip object
  258. * @byte: value to write
  259. *
  260. * Default function to write a byte to I/O[7:0]
  261. */
  262. static void nand_write_byte(struct nand_chip *chip, uint8_t byte)
  263. {
  264. chip->legacy.write_buf(chip, &byte, 1);
  265. }
  266. /**
  267. * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
  268. * @chip: NAND chip object
  269. * @byte: value to write
  270. *
  271. * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
  272. */
  273. static void nand_write_byte16(struct nand_chip *chip, uint8_t byte)
  274. {
  275. uint16_t word = byte;
  276. /*
  277. * It's not entirely clear what should happen to I/O[15:8] when writing
  278. * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
  279. *
  280. * When the host supports a 16-bit bus width, only data is
  281. * transferred at the 16-bit width. All address and command line
  282. * transfers shall use only the lower 8-bits of the data bus. During
  283. * command transfers, the host may place any value on the upper
  284. * 8-bits of the data bus. During address transfers, the host shall
  285. * set the upper 8-bits of the data bus to 00h.
  286. *
  287. * One user of the write_byte callback is nand_set_features. The
  288. * four parameters are specified to be written to I/O[7:0], but this is
  289. * neither an address nor a command transfer. Let's assume a 0 on the
  290. * upper I/O lines is OK.
  291. */
  292. chip->legacy.write_buf(chip, (uint8_t *)&word, 2);
  293. }
  294. /**
  295. * nand_write_buf - [DEFAULT] write buffer to chip
  296. * @chip: NAND chip object
  297. * @buf: data buffer
  298. * @len: number of bytes to write
  299. *
  300. * Default write function for 8bit buswidth.
  301. */
  302. static void nand_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
  303. {
  304. iowrite8_rep(chip->legacy.IO_ADDR_W, buf, len);
  305. }
  306. /**
  307. * nand_read_buf - [DEFAULT] read chip data into buffer
  308. * @chip: NAND chip object
  309. * @buf: buffer to store date
  310. * @len: number of bytes to read
  311. *
  312. * Default read function for 8bit buswidth.
  313. */
  314. static void nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
  315. {
  316. ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
  317. }
  318. /**
  319. * nand_write_buf16 - [DEFAULT] write buffer to chip
  320. * @chip: NAND chip object
  321. * @buf: data buffer
  322. * @len: number of bytes to write
  323. *
  324. * Default write function for 16bit buswidth.
  325. */
  326. static void nand_write_buf16(struct nand_chip *chip, const uint8_t *buf,
  327. int len)
  328. {
  329. u16 *p = (u16 *) buf;
  330. iowrite16_rep(chip->legacy.IO_ADDR_W, p, len >> 1);
  331. }
  332. /**
  333. * nand_read_buf16 - [DEFAULT] read chip data into buffer
  334. * @chip: NAND chip object
  335. * @buf: buffer to store date
  336. * @len: number of bytes to read
  337. *
  338. * Default read function for 16bit buswidth.
  339. */
  340. static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
  341. {
  342. u16 *p = (u16 *) buf;
  343. ioread16_rep(chip->legacy.IO_ADDR_R, p, len >> 1);
  344. }
  345. /**
  346. * nand_block_bad - [DEFAULT] Read bad block marker from the chip
  347. * @chip: NAND chip object
  348. * @ofs: offset from device start
  349. *
  350. * Check, if the block is bad.
  351. */
  352. static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
  353. {
  354. struct mtd_info *mtd = nand_to_mtd(chip);
  355. int page, page_end, res;
  356. u8 bad;
  357. if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
  358. ofs += mtd->erasesize - mtd->writesize;
  359. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  360. page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
  361. for (; page < page_end; page++) {
  362. res = chip->ecc.read_oob(chip, page);
  363. if (res < 0)
  364. return res;
  365. bad = chip->oob_poi[chip->badblockpos];
  366. if (likely(chip->badblockbits == 8))
  367. res = bad != 0xFF;
  368. else
  369. res = hweight8(bad) < chip->badblockbits;
  370. if (res)
  371. return res;
  372. }
  373. return 0;
  374. }
  375. /**
  376. * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
  377. * @chip: NAND chip object
  378. * @ofs: offset from device start
  379. *
  380. * This is the default implementation, which can be overridden by a hardware
  381. * specific driver. It provides the details for writing a bad block marker to a
  382. * block.
  383. */
  384. static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
  385. {
  386. struct mtd_info *mtd = nand_to_mtd(chip);
  387. struct mtd_oob_ops ops;
  388. uint8_t buf[2] = { 0, 0 };
  389. int ret = 0, res, i = 0;
  390. memset(&ops, 0, sizeof(ops));
  391. ops.oobbuf = buf;
  392. ops.ooboffs = chip->badblockpos;
  393. if (chip->options & NAND_BUSWIDTH_16) {
  394. ops.ooboffs &= ~0x01;
  395. ops.len = ops.ooblen = 2;
  396. } else {
  397. ops.len = ops.ooblen = 1;
  398. }
  399. ops.mode = MTD_OPS_PLACE_OOB;
  400. /* Write to first/last page(s) if necessary */
  401. if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
  402. ofs += mtd->erasesize - mtd->writesize;
  403. do {
  404. res = nand_do_write_oob(mtd, ofs, &ops);
  405. if (!ret)
  406. ret = res;
  407. i++;
  408. ofs += mtd->writesize;
  409. } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
  410. return ret;
  411. }
  412. /**
  413. * nand_markbad_bbm - mark a block by updating the BBM
  414. * @chip: NAND chip object
  415. * @ofs: offset of the block to mark bad
  416. */
  417. int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
  418. {
  419. if (chip->legacy.block_markbad)
  420. return chip->legacy.block_markbad(chip, ofs);
  421. return nand_default_block_markbad(chip, ofs);
  422. }
  423. static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
  424. {
  425. if (chip->legacy.block_bad)
  426. return chip->legacy.block_bad(chip, ofs);
  427. return nand_block_bad(chip, ofs);
  428. }
  429. /**
  430. * nand_block_markbad_lowlevel - mark a block bad
  431. * @mtd: MTD device structure
  432. * @ofs: offset from device start
  433. *
  434. * This function performs the generic NAND bad block marking steps (i.e., bad
  435. * block table(s) and/or marker(s)). We only allow the hardware driver to
  436. * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
  437. *
  438. * We try operations in the following order:
  439. *
  440. * (1) erase the affected block, to allow OOB marker to be written cleanly
  441. * (2) write bad block marker to OOB area of affected block (unless flag
  442. * NAND_BBT_NO_OOB_BBM is present)
  443. * (3) update the BBT
  444. *
  445. * Note that we retain the first error encountered in (2) or (3), finish the
  446. * procedures, and dump the error in the end.
  447. */
  448. static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
  449. {
  450. struct nand_chip *chip = mtd_to_nand(mtd);
  451. int res, ret = 0;
  452. if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
  453. struct erase_info einfo;
  454. /* Attempt erase before marking OOB */
  455. memset(&einfo, 0, sizeof(einfo));
  456. einfo.addr = ofs;
  457. einfo.len = 1ULL << chip->phys_erase_shift;
  458. nand_erase_nand(chip, &einfo, 0);
  459. /* Write bad block marker to OOB */
  460. nand_get_device(mtd, FL_WRITING);
  461. ret = nand_markbad_bbm(chip, ofs);
  462. nand_release_device(mtd);
  463. }
  464. /* Mark block bad in BBT */
  465. if (chip->bbt) {
  466. res = nand_markbad_bbt(chip, ofs);
  467. if (!ret)
  468. ret = res;
  469. }
  470. if (!ret)
  471. mtd->ecc_stats.badblocks++;
  472. return ret;
  473. }
  474. /**
  475. * nand_check_wp - [GENERIC] check if the chip is write protected
  476. * @mtd: MTD device structure
  477. *
  478. * Check, if the device is write protected. The function expects, that the
  479. * device is already selected.
  480. */
  481. static int nand_check_wp(struct mtd_info *mtd)
  482. {
  483. struct nand_chip *chip = mtd_to_nand(mtd);
  484. u8 status;
  485. int ret;
  486. /* Broken xD cards report WP despite being writable */
  487. if (chip->options & NAND_BROKEN_XD)
  488. return 0;
  489. /* Check the WP bit */
  490. ret = nand_status_op(chip, &status);
  491. if (ret)
  492. return ret;
  493. return status & NAND_STATUS_WP ? 0 : 1;
  494. }
  495. /**
  496. * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
  497. * @mtd: MTD device structure
  498. * @ofs: offset from device start
  499. *
  500. * Check if the block is marked as reserved.
  501. */
  502. static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
  503. {
  504. struct nand_chip *chip = mtd_to_nand(mtd);
  505. if (!chip->bbt)
  506. return 0;
  507. /* Return info from the table */
  508. return nand_isreserved_bbt(chip, ofs);
  509. }
  510. /**
  511. * nand_block_checkbad - [GENERIC] Check if a block is marked bad
  512. * @mtd: MTD device structure
  513. * @ofs: offset from device start
  514. * @allowbbt: 1, if its allowed to access the bbt area
  515. *
  516. * Check, if the block is bad. Either by reading the bad block table or
  517. * calling of the scan function.
  518. */
  519. static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
  520. {
  521. struct nand_chip *chip = mtd_to_nand(mtd);
  522. /* Return info from the table */
  523. if (chip->bbt)
  524. return nand_isbad_bbt(chip, ofs, allowbbt);
  525. return nand_isbad_bbm(chip, ofs);
  526. }
  527. /**
  528. * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
  529. * @mtd: MTD device structure
  530. * @timeo: Timeout
  531. *
  532. * Helper function for nand_wait_ready used when needing to wait in interrupt
  533. * context.
  534. */
  535. static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
  536. {
  537. struct nand_chip *chip = mtd_to_nand(mtd);
  538. int i;
  539. /* Wait for the device to get ready */
  540. for (i = 0; i < timeo; i++) {
  541. if (chip->legacy.dev_ready(chip))
  542. break;
  543. touch_softlockup_watchdog();
  544. mdelay(1);
  545. }
  546. }
  547. /**
  548. * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
  549. * @chip: NAND chip object
  550. *
  551. * Wait for the ready pin after a command, and warn if a timeout occurs.
  552. */
  553. void nand_wait_ready(struct nand_chip *chip)
  554. {
  555. struct mtd_info *mtd = nand_to_mtd(chip);
  556. unsigned long timeo = 400;
  557. if (in_interrupt() || oops_in_progress)
  558. return panic_nand_wait_ready(mtd, timeo);
  559. /* Wait until command is processed or timeout occurs */
  560. timeo = jiffies + msecs_to_jiffies(timeo);
  561. do {
  562. if (chip->legacy.dev_ready(chip))
  563. return;
  564. cond_resched();
  565. } while (time_before(jiffies, timeo));
  566. if (!chip->legacy.dev_ready(chip))
  567. pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
  568. }
  569. EXPORT_SYMBOL_GPL(nand_wait_ready);
  570. /**
  571. * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
  572. * @mtd: MTD device structure
  573. * @timeo: Timeout in ms
  574. *
  575. * Wait for status ready (i.e. command done) or timeout.
  576. */
  577. static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
  578. {
  579. register struct nand_chip *chip = mtd_to_nand(mtd);
  580. int ret;
  581. timeo = jiffies + msecs_to_jiffies(timeo);
  582. do {
  583. u8 status;
  584. ret = nand_read_data_op(chip, &status, sizeof(status), true);
  585. if (ret)
  586. return;
  587. if (status & NAND_STATUS_READY)
  588. break;
  589. touch_softlockup_watchdog();
  590. } while (time_before(jiffies, timeo));
  591. };
  592. /**
  593. * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
  594. * @chip: NAND chip structure
  595. * @timeout_ms: Timeout in ms
  596. *
  597. * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
  598. * If that does not happen whitin the specified timeout, -ETIMEDOUT is
  599. * returned.
  600. *
  601. * This helper is intended to be used when the controller does not have access
  602. * to the NAND R/B pin.
  603. *
  604. * Be aware that calling this helper from an ->exec_op() implementation means
  605. * ->exec_op() must be re-entrant.
  606. *
  607. * Return 0 if the NAND chip is ready, a negative error otherwise.
  608. */
  609. int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
  610. {
  611. const struct nand_sdr_timings *timings;
  612. u8 status = 0;
  613. int ret;
  614. if (!chip->exec_op)
  615. return -ENOTSUPP;
  616. /* Wait tWB before polling the STATUS reg. */
  617. timings = nand_get_sdr_timings(&chip->data_interface);
  618. ndelay(PSEC_TO_NSEC(timings->tWB_max));
  619. ret = nand_status_op(chip, NULL);
  620. if (ret)
  621. return ret;
  622. timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
  623. do {
  624. ret = nand_read_data_op(chip, &status, sizeof(status), true);
  625. if (ret)
  626. break;
  627. if (status & NAND_STATUS_READY)
  628. break;
  629. /*
  630. * Typical lowest execution time for a tR on most NANDs is 10us,
  631. * use this as polling delay before doing something smarter (ie.
  632. * deriving a delay from the timeout value, timeout_ms/ratio).
  633. */
  634. udelay(10);
  635. } while (time_before(jiffies, timeout_ms));
  636. /*
  637. * We have to exit READ_STATUS mode in order to read real data on the
  638. * bus in case the WAITRDY instruction is preceding a DATA_IN
  639. * instruction.
  640. */
  641. nand_exit_status_op(chip);
  642. if (ret)
  643. return ret;
  644. return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
  645. };
  646. EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
  647. /**
  648. * nand_command - [DEFAULT] Send command to NAND device
  649. * @chip: NAND chip object
  650. * @command: the command to be sent
  651. * @column: the column address for this command, -1 if none
  652. * @page_addr: the page address for this command, -1 if none
  653. *
  654. * Send command to NAND device. This function is used for small page devices
  655. * (512 Bytes per page).
  656. */
  657. static void nand_command(struct nand_chip *chip, unsigned int command,
  658. int column, int page_addr)
  659. {
  660. struct mtd_info *mtd = nand_to_mtd(chip);
  661. int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
  662. /* Write out the command to the device */
  663. if (command == NAND_CMD_SEQIN) {
  664. int readcmd;
  665. if (column >= mtd->writesize) {
  666. /* OOB area */
  667. column -= mtd->writesize;
  668. readcmd = NAND_CMD_READOOB;
  669. } else if (column < 256) {
  670. /* First 256 bytes --> READ0 */
  671. readcmd = NAND_CMD_READ0;
  672. } else {
  673. column -= 256;
  674. readcmd = NAND_CMD_READ1;
  675. }
  676. chip->legacy.cmd_ctrl(chip, readcmd, ctrl);
  677. ctrl &= ~NAND_CTRL_CHANGE;
  678. }
  679. if (command != NAND_CMD_NONE)
  680. chip->legacy.cmd_ctrl(chip, command, ctrl);
  681. /* Address cycle, when necessary */
  682. ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
  683. /* Serially input address */
  684. if (column != -1) {
  685. /* Adjust columns for 16 bit buswidth */
  686. if (chip->options & NAND_BUSWIDTH_16 &&
  687. !nand_opcode_8bits(command))
  688. column >>= 1;
  689. chip->legacy.cmd_ctrl(chip, column, ctrl);
  690. ctrl &= ~NAND_CTRL_CHANGE;
  691. }
  692. if (page_addr != -1) {
  693. chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
  694. ctrl &= ~NAND_CTRL_CHANGE;
  695. chip->legacy.cmd_ctrl(chip, page_addr >> 8, ctrl);
  696. if (chip->options & NAND_ROW_ADDR_3)
  697. chip->legacy.cmd_ctrl(chip, page_addr >> 16, ctrl);
  698. }
  699. chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
  700. NAND_NCE | NAND_CTRL_CHANGE);
  701. /*
  702. * Program and erase have their own busy handlers status and sequential
  703. * in needs no delay
  704. */
  705. switch (command) {
  706. case NAND_CMD_NONE:
  707. case NAND_CMD_PAGEPROG:
  708. case NAND_CMD_ERASE1:
  709. case NAND_CMD_ERASE2:
  710. case NAND_CMD_SEQIN:
  711. case NAND_CMD_STATUS:
  712. case NAND_CMD_READID:
  713. case NAND_CMD_SET_FEATURES:
  714. return;
  715. case NAND_CMD_RESET:
  716. if (chip->legacy.dev_ready)
  717. break;
  718. udelay(chip->legacy.chip_delay);
  719. chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
  720. NAND_CTRL_CLE | NAND_CTRL_CHANGE);
  721. chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
  722. NAND_NCE | NAND_CTRL_CHANGE);
  723. /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
  724. nand_wait_status_ready(mtd, 250);
  725. return;
  726. /* This applies to read commands */
  727. case NAND_CMD_READ0:
  728. /*
  729. * READ0 is sometimes used to exit GET STATUS mode. When this
  730. * is the case no address cycles are requested, and we can use
  731. * this information to detect that we should not wait for the
  732. * device to be ready.
  733. */
  734. if (column == -1 && page_addr == -1)
  735. return;
  736. default:
  737. /*
  738. * If we don't have access to the busy pin, we apply the given
  739. * command delay
  740. */
  741. if (!chip->legacy.dev_ready) {
  742. udelay(chip->legacy.chip_delay);
  743. return;
  744. }
  745. }
  746. /*
  747. * Apply this short delay always to ensure that we do wait tWB in
  748. * any case on any machine.
  749. */
  750. ndelay(100);
  751. nand_wait_ready(chip);
  752. }
  753. static void nand_ccs_delay(struct nand_chip *chip)
  754. {
  755. /*
  756. * The controller already takes care of waiting for tCCS when the RNDIN
  757. * or RNDOUT command is sent, return directly.
  758. */
  759. if (!(chip->options & NAND_WAIT_TCCS))
  760. return;
  761. /*
  762. * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
  763. * (which should be safe for all NANDs).
  764. */
  765. if (chip->setup_data_interface)
  766. ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
  767. else
  768. ndelay(500);
  769. }
  770. /**
  771. * nand_command_lp - [DEFAULT] Send command to NAND large page device
  772. * @chip: NAND chip object
  773. * @command: the command to be sent
  774. * @column: the column address for this command, -1 if none
  775. * @page_addr: the page address for this command, -1 if none
  776. *
  777. * Send command to NAND device. This is the version for the new large page
  778. * devices. We don't have the separate regions as we have in the small page
  779. * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
  780. */
  781. static void nand_command_lp(struct nand_chip *chip, unsigned int command,
  782. int column, int page_addr)
  783. {
  784. struct mtd_info *mtd = nand_to_mtd(chip);
  785. /* Emulate NAND_CMD_READOOB */
  786. if (command == NAND_CMD_READOOB) {
  787. column += mtd->writesize;
  788. command = NAND_CMD_READ0;
  789. }
  790. /* Command latch cycle */
  791. if (command != NAND_CMD_NONE)
  792. chip->legacy.cmd_ctrl(chip, command,
  793. NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  794. if (column != -1 || page_addr != -1) {
  795. int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
  796. /* Serially input address */
  797. if (column != -1) {
  798. /* Adjust columns for 16 bit buswidth */
  799. if (chip->options & NAND_BUSWIDTH_16 &&
  800. !nand_opcode_8bits(command))
  801. column >>= 1;
  802. chip->legacy.cmd_ctrl(chip, column, ctrl);
  803. ctrl &= ~NAND_CTRL_CHANGE;
  804. /* Only output a single addr cycle for 8bits opcodes. */
  805. if (!nand_opcode_8bits(command))
  806. chip->legacy.cmd_ctrl(chip, column >> 8, ctrl);
  807. }
  808. if (page_addr != -1) {
  809. chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
  810. chip->legacy.cmd_ctrl(chip, page_addr >> 8,
  811. NAND_NCE | NAND_ALE);
  812. if (chip->options & NAND_ROW_ADDR_3)
  813. chip->legacy.cmd_ctrl(chip, page_addr >> 16,
  814. NAND_NCE | NAND_ALE);
  815. }
  816. }
  817. chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
  818. NAND_NCE | NAND_CTRL_CHANGE);
  819. /*
  820. * Program and erase have their own busy handlers status, sequential
  821. * in and status need no delay.
  822. */
  823. switch (command) {
  824. case NAND_CMD_NONE:
  825. case NAND_CMD_CACHEDPROG:
  826. case NAND_CMD_PAGEPROG:
  827. case NAND_CMD_ERASE1:
  828. case NAND_CMD_ERASE2:
  829. case NAND_CMD_SEQIN:
  830. case NAND_CMD_STATUS:
  831. case NAND_CMD_READID:
  832. case NAND_CMD_SET_FEATURES:
  833. return;
  834. case NAND_CMD_RNDIN:
  835. nand_ccs_delay(chip);
  836. return;
  837. case NAND_CMD_RESET:
  838. if (chip->legacy.dev_ready)
  839. break;
  840. udelay(chip->legacy.chip_delay);
  841. chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
  842. NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  843. chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
  844. NAND_NCE | NAND_CTRL_CHANGE);
  845. /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
  846. nand_wait_status_ready(mtd, 250);
  847. return;
  848. case NAND_CMD_RNDOUT:
  849. /* No ready / busy check necessary */
  850. chip->legacy.cmd_ctrl(chip, NAND_CMD_RNDOUTSTART,
  851. NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  852. chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
  853. NAND_NCE | NAND_CTRL_CHANGE);
  854. nand_ccs_delay(chip);
  855. return;
  856. case NAND_CMD_READ0:
  857. /*
  858. * READ0 is sometimes used to exit GET STATUS mode. When this
  859. * is the case no address cycles are requested, and we can use
  860. * this information to detect that READSTART should not be
  861. * issued.
  862. */
  863. if (column == -1 && page_addr == -1)
  864. return;
  865. chip->legacy.cmd_ctrl(chip, NAND_CMD_READSTART,
  866. NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  867. chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
  868. NAND_NCE | NAND_CTRL_CHANGE);
  869. /* This applies to read commands */
  870. default:
  871. /*
  872. * If we don't have access to the busy pin, we apply the given
  873. * command delay.
  874. */
  875. if (!chip->legacy.dev_ready) {
  876. udelay(chip->legacy.chip_delay);
  877. return;
  878. }
  879. }
  880. /*
  881. * Apply this short delay always to ensure that we do wait tWB in
  882. * any case on any machine.
  883. */
  884. ndelay(100);
  885. nand_wait_ready(chip);
  886. }
  887. /**
  888. * panic_nand_get_device - [GENERIC] Get chip for selected access
  889. * @chip: the nand chip descriptor
  890. * @mtd: MTD device structure
  891. * @new_state: the state which is requested
  892. *
  893. * Used when in panic, no locks are taken.
  894. */
  895. static void panic_nand_get_device(struct nand_chip *chip,
  896. struct mtd_info *mtd, int new_state)
  897. {
  898. /* Hardware controller shared among independent devices */
  899. chip->controller->active = chip;
  900. chip->state = new_state;
  901. }
  902. /**
  903. * nand_get_device - [GENERIC] Get chip for selected access
  904. * @mtd: MTD device structure
  905. * @new_state: the state which is requested
  906. *
  907. * Get the device and lock it for exclusive access
  908. */
  909. static int
  910. nand_get_device(struct mtd_info *mtd, int new_state)
  911. {
  912. struct nand_chip *chip = mtd_to_nand(mtd);
  913. spinlock_t *lock = &chip->controller->lock;
  914. wait_queue_head_t *wq = &chip->controller->wq;
  915. DECLARE_WAITQUEUE(wait, current);
  916. retry:
  917. spin_lock(lock);
  918. /* Hardware controller shared among independent devices */
  919. if (!chip->controller->active)
  920. chip->controller->active = chip;
  921. if (chip->controller->active == chip && chip->state == FL_READY) {
  922. chip->state = new_state;
  923. spin_unlock(lock);
  924. return 0;
  925. }
  926. if (new_state == FL_PM_SUSPENDED) {
  927. if (chip->controller->active->state == FL_PM_SUSPENDED) {
  928. chip->state = FL_PM_SUSPENDED;
  929. spin_unlock(lock);
  930. return 0;
  931. }
  932. }
  933. set_current_state(TASK_UNINTERRUPTIBLE);
  934. add_wait_queue(wq, &wait);
  935. spin_unlock(lock);
  936. schedule();
  937. remove_wait_queue(wq, &wait);
  938. goto retry;
  939. }
  940. /**
  941. * panic_nand_wait - [GENERIC] wait until the command is done
  942. * @mtd: MTD device structure
  943. * @chip: NAND chip structure
  944. * @timeo: timeout
  945. *
  946. * Wait for command done. This is a helper function for nand_wait used when
  947. * we are in interrupt context. May happen when in panic and trying to write
  948. * an oops through mtdoops.
  949. */
  950. static void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
  951. {
  952. int i;
  953. for (i = 0; i < timeo; i++) {
  954. if (chip->legacy.dev_ready) {
  955. if (chip->legacy.dev_ready(chip))
  956. break;
  957. } else {
  958. int ret;
  959. u8 status;
  960. ret = nand_read_data_op(chip, &status, sizeof(status),
  961. true);
  962. if (ret)
  963. return;
  964. if (status & NAND_STATUS_READY)
  965. break;
  966. }
  967. mdelay(1);
  968. }
  969. }
  970. /**
  971. * nand_wait - [DEFAULT] wait until the command is done
  972. * @mtd: MTD device structure
  973. * @chip: NAND chip structure
  974. *
  975. * Wait for command done. This applies to erase and program only.
  976. */
  977. static int nand_wait(struct nand_chip *chip)
  978. {
  979. unsigned long timeo = 400;
  980. u8 status;
  981. int ret;
  982. /*
  983. * Apply this short delay always to ensure that we do wait tWB in any
  984. * case on any machine.
  985. */
  986. ndelay(100);
  987. ret = nand_status_op(chip, NULL);
  988. if (ret)
  989. return ret;
  990. if (in_interrupt() || oops_in_progress)
  991. panic_nand_wait(chip, timeo);
  992. else {
  993. timeo = jiffies + msecs_to_jiffies(timeo);
  994. do {
  995. if (chip->legacy.dev_ready) {
  996. if (chip->legacy.dev_ready(chip))
  997. break;
  998. } else {
  999. ret = nand_read_data_op(chip, &status,
  1000. sizeof(status), true);
  1001. if (ret)
  1002. return ret;
  1003. if (status & NAND_STATUS_READY)
  1004. break;
  1005. }
  1006. cond_resched();
  1007. } while (time_before(jiffies, timeo));
  1008. }
  1009. ret = nand_read_data_op(chip, &status, sizeof(status), true);
  1010. if (ret)
  1011. return ret;
  1012. /* This can happen if in case of timeout or buggy dev_ready */
  1013. WARN_ON(!(status & NAND_STATUS_READY));
  1014. return status;
  1015. }
  1016. static bool nand_supports_get_features(struct nand_chip *chip, int addr)
  1017. {
  1018. return (chip->parameters.supports_set_get_features &&
  1019. test_bit(addr, chip->parameters.get_feature_list));
  1020. }
  1021. static bool nand_supports_set_features(struct nand_chip *chip, int addr)
  1022. {
  1023. return (chip->parameters.supports_set_get_features &&
  1024. test_bit(addr, chip->parameters.set_feature_list));
  1025. }
  1026. /**
  1027. * nand_reset_data_interface - Reset data interface and timings
  1028. * @chip: The NAND chip
  1029. * @chipnr: Internal die id
  1030. *
  1031. * Reset the Data interface and timings to ONFI mode 0.
  1032. *
  1033. * Returns 0 for success or negative error code otherwise.
  1034. */
  1035. static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
  1036. {
  1037. int ret;
  1038. if (!chip->setup_data_interface)
  1039. return 0;
  1040. /*
  1041. * The ONFI specification says:
  1042. * "
  1043. * To transition from NV-DDR or NV-DDR2 to the SDR data
  1044. * interface, the host shall use the Reset (FFh) command
  1045. * using SDR timing mode 0. A device in any timing mode is
  1046. * required to recognize Reset (FFh) command issued in SDR
  1047. * timing mode 0.
  1048. * "
  1049. *
  1050. * Configure the data interface in SDR mode and set the
  1051. * timings to timing mode 0.
  1052. */
  1053. onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
  1054. ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface);
  1055. if (ret)
  1056. pr_err("Failed to configure data interface to SDR timing mode 0\n");
  1057. return ret;
  1058. }
  1059. /**
  1060. * nand_setup_data_interface - Setup the best data interface and timings
  1061. * @chip: The NAND chip
  1062. * @chipnr: Internal die id
  1063. *
  1064. * Find and configure the best data interface and NAND timings supported by
  1065. * the chip and the driver.
  1066. * First tries to retrieve supported timing modes from ONFI information,
  1067. * and if the NAND chip does not support ONFI, relies on the
  1068. * ->onfi_timing_mode_default specified in the nand_ids table.
  1069. *
  1070. * Returns 0 for success or negative error code otherwise.
  1071. */
  1072. static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
  1073. {
  1074. u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
  1075. chip->onfi_timing_mode_default,
  1076. };
  1077. int ret;
  1078. if (!chip->setup_data_interface)
  1079. return 0;
  1080. /* Change the mode on the chip side (if supported by the NAND chip) */
  1081. if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
  1082. chip->select_chip(chip, chipnr);
  1083. ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
  1084. tmode_param);
  1085. chip->select_chip(chip, -1);
  1086. if (ret)
  1087. return ret;
  1088. }
  1089. /* Change the mode on the controller side */
  1090. ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface);
  1091. if (ret)
  1092. return ret;
  1093. /* Check the mode has been accepted by the chip, if supported */
  1094. if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
  1095. return 0;
  1096. memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
  1097. chip->select_chip(chip, chipnr);
  1098. ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
  1099. tmode_param);
  1100. chip->select_chip(chip, -1);
  1101. if (ret)
  1102. goto err_reset_chip;
  1103. if (tmode_param[0] != chip->onfi_timing_mode_default) {
  1104. pr_warn("timing mode %d not acknowledged by the NAND chip\n",
  1105. chip->onfi_timing_mode_default);
  1106. goto err_reset_chip;
  1107. }
  1108. return 0;
  1109. err_reset_chip:
  1110. /*
  1111. * Fallback to mode 0 if the chip explicitly did not ack the chosen
  1112. * timing mode.
  1113. */
  1114. nand_reset_data_interface(chip, chipnr);
  1115. chip->select_chip(chip, chipnr);
  1116. nand_reset_op(chip);
  1117. chip->select_chip(chip, -1);
  1118. return ret;
  1119. }
  1120. /**
  1121. * nand_init_data_interface - find the best data interface and timings
  1122. * @chip: The NAND chip
  1123. *
  1124. * Find the best data interface and NAND timings supported by the chip
  1125. * and the driver.
  1126. * First tries to retrieve supported timing modes from ONFI information,
  1127. * and if the NAND chip does not support ONFI, relies on the
  1128. * ->onfi_timing_mode_default specified in the nand_ids table. After this
  1129. * function nand_chip->data_interface is initialized with the best timing mode
  1130. * available.
  1131. *
  1132. * Returns 0 for success or negative error code otherwise.
  1133. */
  1134. static int nand_init_data_interface(struct nand_chip *chip)
  1135. {
  1136. int modes, mode, ret;
  1137. if (!chip->setup_data_interface)
  1138. return 0;
  1139. /*
  1140. * First try to identify the best timings from ONFI parameters and
  1141. * if the NAND does not support ONFI, fallback to the default ONFI
  1142. * timing mode.
  1143. */
  1144. if (chip->parameters.onfi) {
  1145. modes = chip->parameters.onfi->async_timing_mode;
  1146. } else {
  1147. if (!chip->onfi_timing_mode_default)
  1148. return 0;
  1149. modes = GENMASK(chip->onfi_timing_mode_default, 0);
  1150. }
  1151. for (mode = fls(modes) - 1; mode >= 0; mode--) {
  1152. ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
  1153. if (ret)
  1154. continue;
  1155. /*
  1156. * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
  1157. * controller supports the requested timings.
  1158. */
  1159. ret = chip->setup_data_interface(chip,
  1160. NAND_DATA_IFACE_CHECK_ONLY,
  1161. &chip->data_interface);
  1162. if (!ret) {
  1163. chip->onfi_timing_mode_default = mode;
  1164. break;
  1165. }
  1166. }
  1167. return 0;
  1168. }
  1169. /**
  1170. * nand_fill_column_cycles - fill the column cycles of an address
  1171. * @chip: The NAND chip
  1172. * @addrs: Array of address cycles to fill
  1173. * @offset_in_page: The offset in the page
  1174. *
  1175. * Fills the first or the first two bytes of the @addrs field depending
  1176. * on the NAND bus width and the page size.
  1177. *
  1178. * Returns the number of cycles needed to encode the column, or a negative
  1179. * error code in case one of the arguments is invalid.
  1180. */
  1181. static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
  1182. unsigned int offset_in_page)
  1183. {
  1184. struct mtd_info *mtd = nand_to_mtd(chip);
  1185. /* Make sure the offset is less than the actual page size. */
  1186. if (offset_in_page > mtd->writesize + mtd->oobsize)
  1187. return -EINVAL;
  1188. /*
  1189. * On small page NANDs, there's a dedicated command to access the OOB
  1190. * area, and the column address is relative to the start of the OOB
  1191. * area, not the start of the page. Asjust the address accordingly.
  1192. */
  1193. if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
  1194. offset_in_page -= mtd->writesize;
  1195. /*
  1196. * The offset in page is expressed in bytes, if the NAND bus is 16-bit
  1197. * wide, then it must be divided by 2.
  1198. */
  1199. if (chip->options & NAND_BUSWIDTH_16) {
  1200. if (WARN_ON(offset_in_page % 2))
  1201. return -EINVAL;
  1202. offset_in_page /= 2;
  1203. }
  1204. addrs[0] = offset_in_page;
  1205. /*
  1206. * Small page NANDs use 1 cycle for the columns, while large page NANDs
  1207. * need 2
  1208. */
  1209. if (mtd->writesize <= 512)
  1210. return 1;
  1211. addrs[1] = offset_in_page >> 8;
  1212. return 2;
  1213. }
  1214. static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
  1215. unsigned int offset_in_page, void *buf,
  1216. unsigned int len)
  1217. {
  1218. struct mtd_info *mtd = nand_to_mtd(chip);
  1219. const struct nand_sdr_timings *sdr =
  1220. nand_get_sdr_timings(&chip->data_interface);
  1221. u8 addrs[4];
  1222. struct nand_op_instr instrs[] = {
  1223. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1224. NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
  1225. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
  1226. PSEC_TO_NSEC(sdr->tRR_min)),
  1227. NAND_OP_DATA_IN(len, buf, 0),
  1228. };
  1229. struct nand_operation op = NAND_OPERATION(instrs);
  1230. int ret;
  1231. /* Drop the DATA_IN instruction if len is set to 0. */
  1232. if (!len)
  1233. op.ninstrs--;
  1234. if (offset_in_page >= mtd->writesize)
  1235. instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
  1236. else if (offset_in_page >= 256 &&
  1237. !(chip->options & NAND_BUSWIDTH_16))
  1238. instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
  1239. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1240. if (ret < 0)
  1241. return ret;
  1242. addrs[1] = page;
  1243. addrs[2] = page >> 8;
  1244. if (chip->options & NAND_ROW_ADDR_3) {
  1245. addrs[3] = page >> 16;
  1246. instrs[1].ctx.addr.naddrs++;
  1247. }
  1248. return nand_exec_op(chip, &op);
  1249. }
  1250. static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
  1251. unsigned int offset_in_page, void *buf,
  1252. unsigned int len)
  1253. {
  1254. const struct nand_sdr_timings *sdr =
  1255. nand_get_sdr_timings(&chip->data_interface);
  1256. u8 addrs[5];
  1257. struct nand_op_instr instrs[] = {
  1258. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1259. NAND_OP_ADDR(4, addrs, 0),
  1260. NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
  1261. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
  1262. PSEC_TO_NSEC(sdr->tRR_min)),
  1263. NAND_OP_DATA_IN(len, buf, 0),
  1264. };
  1265. struct nand_operation op = NAND_OPERATION(instrs);
  1266. int ret;
  1267. /* Drop the DATA_IN instruction if len is set to 0. */
  1268. if (!len)
  1269. op.ninstrs--;
  1270. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1271. if (ret < 0)
  1272. return ret;
  1273. addrs[2] = page;
  1274. addrs[3] = page >> 8;
  1275. if (chip->options & NAND_ROW_ADDR_3) {
  1276. addrs[4] = page >> 16;
  1277. instrs[1].ctx.addr.naddrs++;
  1278. }
  1279. return nand_exec_op(chip, &op);
  1280. }
  1281. /**
  1282. * nand_read_page_op - Do a READ PAGE operation
  1283. * @chip: The NAND chip
  1284. * @page: page to read
  1285. * @offset_in_page: offset within the page
  1286. * @buf: buffer used to store the data
  1287. * @len: length of the buffer
  1288. *
  1289. * This function issues a READ PAGE operation.
  1290. * This function does not select/unselect the CS line.
  1291. *
  1292. * Returns 0 on success, a negative error code otherwise.
  1293. */
  1294. int nand_read_page_op(struct nand_chip *chip, unsigned int page,
  1295. unsigned int offset_in_page, void *buf, unsigned int len)
  1296. {
  1297. struct mtd_info *mtd = nand_to_mtd(chip);
  1298. if (len && !buf)
  1299. return -EINVAL;
  1300. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1301. return -EINVAL;
  1302. if (chip->exec_op) {
  1303. if (mtd->writesize > 512)
  1304. return nand_lp_exec_read_page_op(chip, page,
  1305. offset_in_page, buf,
  1306. len);
  1307. return nand_sp_exec_read_page_op(chip, page, offset_in_page,
  1308. buf, len);
  1309. }
  1310. chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
  1311. if (len)
  1312. chip->legacy.read_buf(chip, buf, len);
  1313. return 0;
  1314. }
  1315. EXPORT_SYMBOL_GPL(nand_read_page_op);
  1316. /**
  1317. * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
  1318. * @chip: The NAND chip
  1319. * @page: parameter page to read
  1320. * @buf: buffer used to store the data
  1321. * @len: length of the buffer
  1322. *
  1323. * This function issues a READ PARAMETER PAGE operation.
  1324. * This function does not select/unselect the CS line.
  1325. *
  1326. * Returns 0 on success, a negative error code otherwise.
  1327. */
  1328. static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
  1329. unsigned int len)
  1330. {
  1331. unsigned int i;
  1332. u8 *p = buf;
  1333. if (len && !buf)
  1334. return -EINVAL;
  1335. if (chip->exec_op) {
  1336. const struct nand_sdr_timings *sdr =
  1337. nand_get_sdr_timings(&chip->data_interface);
  1338. struct nand_op_instr instrs[] = {
  1339. NAND_OP_CMD(NAND_CMD_PARAM, 0),
  1340. NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
  1341. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
  1342. PSEC_TO_NSEC(sdr->tRR_min)),
  1343. NAND_OP_8BIT_DATA_IN(len, buf, 0),
  1344. };
  1345. struct nand_operation op = NAND_OPERATION(instrs);
  1346. /* Drop the DATA_IN instruction if len is set to 0. */
  1347. if (!len)
  1348. op.ninstrs--;
  1349. return nand_exec_op(chip, &op);
  1350. }
  1351. chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
  1352. for (i = 0; i < len; i++)
  1353. p[i] = chip->legacy.read_byte(chip);
  1354. return 0;
  1355. }
  1356. /**
  1357. * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
  1358. * @chip: The NAND chip
  1359. * @offset_in_page: offset within the page
  1360. * @buf: buffer used to store the data
  1361. * @len: length of the buffer
  1362. * @force_8bit: force 8-bit bus access
  1363. *
  1364. * This function issues a CHANGE READ COLUMN operation.
  1365. * This function does not select/unselect the CS line.
  1366. *
  1367. * Returns 0 on success, a negative error code otherwise.
  1368. */
  1369. int nand_change_read_column_op(struct nand_chip *chip,
  1370. unsigned int offset_in_page, void *buf,
  1371. unsigned int len, bool force_8bit)
  1372. {
  1373. struct mtd_info *mtd = nand_to_mtd(chip);
  1374. if (len && !buf)
  1375. return -EINVAL;
  1376. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1377. return -EINVAL;
  1378. /* Small page NANDs do not support column change. */
  1379. if (mtd->writesize <= 512)
  1380. return -ENOTSUPP;
  1381. if (chip->exec_op) {
  1382. const struct nand_sdr_timings *sdr =
  1383. nand_get_sdr_timings(&chip->data_interface);
  1384. u8 addrs[2] = {};
  1385. struct nand_op_instr instrs[] = {
  1386. NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
  1387. NAND_OP_ADDR(2, addrs, 0),
  1388. NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
  1389. PSEC_TO_NSEC(sdr->tCCS_min)),
  1390. NAND_OP_DATA_IN(len, buf, 0),
  1391. };
  1392. struct nand_operation op = NAND_OPERATION(instrs);
  1393. int ret;
  1394. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1395. if (ret < 0)
  1396. return ret;
  1397. /* Drop the DATA_IN instruction if len is set to 0. */
  1398. if (!len)
  1399. op.ninstrs--;
  1400. instrs[3].ctx.data.force_8bit = force_8bit;
  1401. return nand_exec_op(chip, &op);
  1402. }
  1403. chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
  1404. if (len)
  1405. chip->legacy.read_buf(chip, buf, len);
  1406. return 0;
  1407. }
  1408. EXPORT_SYMBOL_GPL(nand_change_read_column_op);
  1409. /**
  1410. * nand_read_oob_op - Do a READ OOB operation
  1411. * @chip: The NAND chip
  1412. * @page: page to read
  1413. * @offset_in_oob: offset within the OOB area
  1414. * @buf: buffer used to store the data
  1415. * @len: length of the buffer
  1416. *
  1417. * This function issues a READ OOB operation.
  1418. * This function does not select/unselect the CS line.
  1419. *
  1420. * Returns 0 on success, a negative error code otherwise.
  1421. */
  1422. int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
  1423. unsigned int offset_in_oob, void *buf, unsigned int len)
  1424. {
  1425. struct mtd_info *mtd = nand_to_mtd(chip);
  1426. if (len && !buf)
  1427. return -EINVAL;
  1428. if (offset_in_oob + len > mtd->oobsize)
  1429. return -EINVAL;
  1430. if (chip->exec_op)
  1431. return nand_read_page_op(chip, page,
  1432. mtd->writesize + offset_in_oob,
  1433. buf, len);
  1434. chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
  1435. if (len)
  1436. chip->legacy.read_buf(chip, buf, len);
  1437. return 0;
  1438. }
  1439. EXPORT_SYMBOL_GPL(nand_read_oob_op);
  1440. static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
  1441. unsigned int offset_in_page, const void *buf,
  1442. unsigned int len, bool prog)
  1443. {
  1444. struct mtd_info *mtd = nand_to_mtd(chip);
  1445. const struct nand_sdr_timings *sdr =
  1446. nand_get_sdr_timings(&chip->data_interface);
  1447. u8 addrs[5] = {};
  1448. struct nand_op_instr instrs[] = {
  1449. /*
  1450. * The first instruction will be dropped if we're dealing
  1451. * with a large page NAND and adjusted if we're dealing
  1452. * with a small page NAND and the page offset is > 255.
  1453. */
  1454. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1455. NAND_OP_CMD(NAND_CMD_SEQIN, 0),
  1456. NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
  1457. NAND_OP_DATA_OUT(len, buf, 0),
  1458. NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
  1459. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
  1460. };
  1461. struct nand_operation op = NAND_OPERATION(instrs);
  1462. int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1463. int ret;
  1464. u8 status;
  1465. if (naddrs < 0)
  1466. return naddrs;
  1467. addrs[naddrs++] = page;
  1468. addrs[naddrs++] = page >> 8;
  1469. if (chip->options & NAND_ROW_ADDR_3)
  1470. addrs[naddrs++] = page >> 16;
  1471. instrs[2].ctx.addr.naddrs = naddrs;
  1472. /* Drop the last two instructions if we're not programming the page. */
  1473. if (!prog) {
  1474. op.ninstrs -= 2;
  1475. /* Also drop the DATA_OUT instruction if empty. */
  1476. if (!len)
  1477. op.ninstrs--;
  1478. }
  1479. if (mtd->writesize <= 512) {
  1480. /*
  1481. * Small pages need some more tweaking: we have to adjust the
  1482. * first instruction depending on the page offset we're trying
  1483. * to access.
  1484. */
  1485. if (offset_in_page >= mtd->writesize)
  1486. instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
  1487. else if (offset_in_page >= 256 &&
  1488. !(chip->options & NAND_BUSWIDTH_16))
  1489. instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
  1490. } else {
  1491. /*
  1492. * Drop the first command if we're dealing with a large page
  1493. * NAND.
  1494. */
  1495. op.instrs++;
  1496. op.ninstrs--;
  1497. }
  1498. ret = nand_exec_op(chip, &op);
  1499. if (!prog || ret)
  1500. return ret;
  1501. ret = nand_status_op(chip, &status);
  1502. if (ret)
  1503. return ret;
  1504. return status;
  1505. }
  1506. /**
  1507. * nand_prog_page_begin_op - starts a PROG PAGE operation
  1508. * @chip: The NAND chip
  1509. * @page: page to write
  1510. * @offset_in_page: offset within the page
  1511. * @buf: buffer containing the data to write to the page
  1512. * @len: length of the buffer
  1513. *
  1514. * This function issues the first half of a PROG PAGE operation.
  1515. * This function does not select/unselect the CS line.
  1516. *
  1517. * Returns 0 on success, a negative error code otherwise.
  1518. */
  1519. int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
  1520. unsigned int offset_in_page, const void *buf,
  1521. unsigned int len)
  1522. {
  1523. struct mtd_info *mtd = nand_to_mtd(chip);
  1524. if (len && !buf)
  1525. return -EINVAL;
  1526. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1527. return -EINVAL;
  1528. if (chip->exec_op)
  1529. return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
  1530. len, false);
  1531. chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
  1532. if (buf)
  1533. chip->legacy.write_buf(chip, buf, len);
  1534. return 0;
  1535. }
  1536. EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
  1537. /**
  1538. * nand_prog_page_end_op - ends a PROG PAGE operation
  1539. * @chip: The NAND chip
  1540. *
  1541. * This function issues the second half of a PROG PAGE operation.
  1542. * This function does not select/unselect the CS line.
  1543. *
  1544. * Returns 0 on success, a negative error code otherwise.
  1545. */
  1546. int nand_prog_page_end_op(struct nand_chip *chip)
  1547. {
  1548. int ret;
  1549. u8 status;
  1550. if (chip->exec_op) {
  1551. const struct nand_sdr_timings *sdr =
  1552. nand_get_sdr_timings(&chip->data_interface);
  1553. struct nand_op_instr instrs[] = {
  1554. NAND_OP_CMD(NAND_CMD_PAGEPROG,
  1555. PSEC_TO_NSEC(sdr->tWB_max)),
  1556. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
  1557. };
  1558. struct nand_operation op = NAND_OPERATION(instrs);
  1559. ret = nand_exec_op(chip, &op);
  1560. if (ret)
  1561. return ret;
  1562. ret = nand_status_op(chip, &status);
  1563. if (ret)
  1564. return ret;
  1565. } else {
  1566. chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
  1567. ret = chip->legacy.waitfunc(chip);
  1568. if (ret < 0)
  1569. return ret;
  1570. status = ret;
  1571. }
  1572. if (status & NAND_STATUS_FAIL)
  1573. return -EIO;
  1574. return 0;
  1575. }
  1576. EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
  1577. /**
  1578. * nand_prog_page_op - Do a full PROG PAGE operation
  1579. * @chip: The NAND chip
  1580. * @page: page to write
  1581. * @offset_in_page: offset within the page
  1582. * @buf: buffer containing the data to write to the page
  1583. * @len: length of the buffer
  1584. *
  1585. * This function issues a full PROG PAGE operation.
  1586. * This function does not select/unselect the CS line.
  1587. *
  1588. * Returns 0 on success, a negative error code otherwise.
  1589. */
  1590. int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
  1591. unsigned int offset_in_page, const void *buf,
  1592. unsigned int len)
  1593. {
  1594. struct mtd_info *mtd = nand_to_mtd(chip);
  1595. int status;
  1596. if (!len || !buf)
  1597. return -EINVAL;
  1598. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1599. return -EINVAL;
  1600. if (chip->exec_op) {
  1601. status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
  1602. len, true);
  1603. } else {
  1604. chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
  1605. page);
  1606. chip->legacy.write_buf(chip, buf, len);
  1607. chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
  1608. status = chip->legacy.waitfunc(chip);
  1609. }
  1610. if (status & NAND_STATUS_FAIL)
  1611. return -EIO;
  1612. return 0;
  1613. }
  1614. EXPORT_SYMBOL_GPL(nand_prog_page_op);
  1615. /**
  1616. * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
  1617. * @chip: The NAND chip
  1618. * @offset_in_page: offset within the page
  1619. * @buf: buffer containing the data to send to the NAND
  1620. * @len: length of the buffer
  1621. * @force_8bit: force 8-bit bus access
  1622. *
  1623. * This function issues a CHANGE WRITE COLUMN operation.
  1624. * This function does not select/unselect the CS line.
  1625. *
  1626. * Returns 0 on success, a negative error code otherwise.
  1627. */
  1628. int nand_change_write_column_op(struct nand_chip *chip,
  1629. unsigned int offset_in_page,
  1630. const void *buf, unsigned int len,
  1631. bool force_8bit)
  1632. {
  1633. struct mtd_info *mtd = nand_to_mtd(chip);
  1634. if (len && !buf)
  1635. return -EINVAL;
  1636. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1637. return -EINVAL;
  1638. /* Small page NANDs do not support column change. */
  1639. if (mtd->writesize <= 512)
  1640. return -ENOTSUPP;
  1641. if (chip->exec_op) {
  1642. const struct nand_sdr_timings *sdr =
  1643. nand_get_sdr_timings(&chip->data_interface);
  1644. u8 addrs[2];
  1645. struct nand_op_instr instrs[] = {
  1646. NAND_OP_CMD(NAND_CMD_RNDIN, 0),
  1647. NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
  1648. NAND_OP_DATA_OUT(len, buf, 0),
  1649. };
  1650. struct nand_operation op = NAND_OPERATION(instrs);
  1651. int ret;
  1652. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1653. if (ret < 0)
  1654. return ret;
  1655. instrs[2].ctx.data.force_8bit = force_8bit;
  1656. /* Drop the DATA_OUT instruction if len is set to 0. */
  1657. if (!len)
  1658. op.ninstrs--;
  1659. return nand_exec_op(chip, &op);
  1660. }
  1661. chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
  1662. if (len)
  1663. chip->legacy.write_buf(chip, buf, len);
  1664. return 0;
  1665. }
  1666. EXPORT_SYMBOL_GPL(nand_change_write_column_op);
  1667. /**
  1668. * nand_readid_op - Do a READID operation
  1669. * @chip: The NAND chip
  1670. * @addr: address cycle to pass after the READID command
  1671. * @buf: buffer used to store the ID
  1672. * @len: length of the buffer
  1673. *
  1674. * This function sends a READID command and reads back the ID returned by the
  1675. * NAND.
  1676. * This function does not select/unselect the CS line.
  1677. *
  1678. * Returns 0 on success, a negative error code otherwise.
  1679. */
  1680. int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
  1681. unsigned int len)
  1682. {
  1683. unsigned int i;
  1684. u8 *id = buf;
  1685. if (len && !buf)
  1686. return -EINVAL;
  1687. if (chip->exec_op) {
  1688. const struct nand_sdr_timings *sdr =
  1689. nand_get_sdr_timings(&chip->data_interface);
  1690. struct nand_op_instr instrs[] = {
  1691. NAND_OP_CMD(NAND_CMD_READID, 0),
  1692. NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
  1693. NAND_OP_8BIT_DATA_IN(len, buf, 0),
  1694. };
  1695. struct nand_operation op = NAND_OPERATION(instrs);
  1696. /* Drop the DATA_IN instruction if len is set to 0. */
  1697. if (!len)
  1698. op.ninstrs--;
  1699. return nand_exec_op(chip, &op);
  1700. }
  1701. chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
  1702. for (i = 0; i < len; i++)
  1703. id[i] = chip->legacy.read_byte(chip);
  1704. return 0;
  1705. }
  1706. EXPORT_SYMBOL_GPL(nand_readid_op);
  1707. /**
  1708. * nand_status_op - Do a STATUS operation
  1709. * @chip: The NAND chip
  1710. * @status: out variable to store the NAND status
  1711. *
  1712. * This function sends a STATUS command and reads back the status returned by
  1713. * the NAND.
  1714. * This function does not select/unselect the CS line.
  1715. *
  1716. * Returns 0 on success, a negative error code otherwise.
  1717. */
  1718. int nand_status_op(struct nand_chip *chip, u8 *status)
  1719. {
  1720. if (chip->exec_op) {
  1721. const struct nand_sdr_timings *sdr =
  1722. nand_get_sdr_timings(&chip->data_interface);
  1723. struct nand_op_instr instrs[] = {
  1724. NAND_OP_CMD(NAND_CMD_STATUS,
  1725. PSEC_TO_NSEC(sdr->tADL_min)),
  1726. NAND_OP_8BIT_DATA_IN(1, status, 0),
  1727. };
  1728. struct nand_operation op = NAND_OPERATION(instrs);
  1729. if (!status)
  1730. op.ninstrs--;
  1731. return nand_exec_op(chip, &op);
  1732. }
  1733. chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
  1734. if (status)
  1735. *status = chip->legacy.read_byte(chip);
  1736. return 0;
  1737. }
  1738. EXPORT_SYMBOL_GPL(nand_status_op);
  1739. /**
  1740. * nand_exit_status_op - Exit a STATUS operation
  1741. * @chip: The NAND chip
  1742. *
  1743. * This function sends a READ0 command to cancel the effect of the STATUS
  1744. * command to avoid reading only the status until a new read command is sent.
  1745. *
  1746. * This function does not select/unselect the CS line.
  1747. *
  1748. * Returns 0 on success, a negative error code otherwise.
  1749. */
  1750. int nand_exit_status_op(struct nand_chip *chip)
  1751. {
  1752. if (chip->exec_op) {
  1753. struct nand_op_instr instrs[] = {
  1754. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1755. };
  1756. struct nand_operation op = NAND_OPERATION(instrs);
  1757. return nand_exec_op(chip, &op);
  1758. }
  1759. chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
  1760. return 0;
  1761. }
  1762. EXPORT_SYMBOL_GPL(nand_exit_status_op);
  1763. /**
  1764. * nand_erase_op - Do an erase operation
  1765. * @chip: The NAND chip
  1766. * @eraseblock: block to erase
  1767. *
  1768. * This function sends an ERASE command and waits for the NAND to be ready
  1769. * before returning.
  1770. * This function does not select/unselect the CS line.
  1771. *
  1772. * Returns 0 on success, a negative error code otherwise.
  1773. */
  1774. int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
  1775. {
  1776. unsigned int page = eraseblock <<
  1777. (chip->phys_erase_shift - chip->page_shift);
  1778. int ret;
  1779. u8 status;
  1780. if (chip->exec_op) {
  1781. const struct nand_sdr_timings *sdr =
  1782. nand_get_sdr_timings(&chip->data_interface);
  1783. u8 addrs[3] = { page, page >> 8, page >> 16 };
  1784. struct nand_op_instr instrs[] = {
  1785. NAND_OP_CMD(NAND_CMD_ERASE1, 0),
  1786. NAND_OP_ADDR(2, addrs, 0),
  1787. NAND_OP_CMD(NAND_CMD_ERASE2,
  1788. PSEC_TO_MSEC(sdr->tWB_max)),
  1789. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
  1790. };
  1791. struct nand_operation op = NAND_OPERATION(instrs);
  1792. if (chip->options & NAND_ROW_ADDR_3)
  1793. instrs[1].ctx.addr.naddrs++;
  1794. ret = nand_exec_op(chip, &op);
  1795. if (ret)
  1796. return ret;
  1797. ret = nand_status_op(chip, &status);
  1798. if (ret)
  1799. return ret;
  1800. } else {
  1801. chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
  1802. chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
  1803. ret = chip->legacy.waitfunc(chip);
  1804. if (ret < 0)
  1805. return ret;
  1806. status = ret;
  1807. }
  1808. if (status & NAND_STATUS_FAIL)
  1809. return -EIO;
  1810. return 0;
  1811. }
  1812. EXPORT_SYMBOL_GPL(nand_erase_op);
  1813. /**
  1814. * nand_set_features_op - Do a SET FEATURES operation
  1815. * @chip: The NAND chip
  1816. * @feature: feature id
  1817. * @data: 4 bytes of data
  1818. *
  1819. * This function sends a SET FEATURES command and waits for the NAND to be
  1820. * ready before returning.
  1821. * This function does not select/unselect the CS line.
  1822. *
  1823. * Returns 0 on success, a negative error code otherwise.
  1824. */
  1825. static int nand_set_features_op(struct nand_chip *chip, u8 feature,
  1826. const void *data)
  1827. {
  1828. const u8 *params = data;
  1829. int i, ret;
  1830. if (chip->exec_op) {
  1831. const struct nand_sdr_timings *sdr =
  1832. nand_get_sdr_timings(&chip->data_interface);
  1833. struct nand_op_instr instrs[] = {
  1834. NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
  1835. NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
  1836. NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
  1837. PSEC_TO_NSEC(sdr->tWB_max)),
  1838. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
  1839. };
  1840. struct nand_operation op = NAND_OPERATION(instrs);
  1841. return nand_exec_op(chip, &op);
  1842. }
  1843. chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
  1844. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
  1845. chip->legacy.write_byte(chip, params[i]);
  1846. ret = chip->legacy.waitfunc(chip);
  1847. if (ret < 0)
  1848. return ret;
  1849. if (ret & NAND_STATUS_FAIL)
  1850. return -EIO;
  1851. return 0;
  1852. }
  1853. /**
  1854. * nand_get_features_op - Do a GET FEATURES operation
  1855. * @chip: The NAND chip
  1856. * @feature: feature id
  1857. * @data: 4 bytes of data
  1858. *
  1859. * This function sends a GET FEATURES command and waits for the NAND to be
  1860. * ready before returning.
  1861. * This function does not select/unselect the CS line.
  1862. *
  1863. * Returns 0 on success, a negative error code otherwise.
  1864. */
  1865. static int nand_get_features_op(struct nand_chip *chip, u8 feature,
  1866. void *data)
  1867. {
  1868. u8 *params = data;
  1869. int i;
  1870. if (chip->exec_op) {
  1871. const struct nand_sdr_timings *sdr =
  1872. nand_get_sdr_timings(&chip->data_interface);
  1873. struct nand_op_instr instrs[] = {
  1874. NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
  1875. NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
  1876. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
  1877. PSEC_TO_NSEC(sdr->tRR_min)),
  1878. NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
  1879. data, 0),
  1880. };
  1881. struct nand_operation op = NAND_OPERATION(instrs);
  1882. return nand_exec_op(chip, &op);
  1883. }
  1884. chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
  1885. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
  1886. params[i] = chip->legacy.read_byte(chip);
  1887. return 0;
  1888. }
  1889. static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
  1890. unsigned int delay_ns)
  1891. {
  1892. if (chip->exec_op) {
  1893. struct nand_op_instr instrs[] = {
  1894. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
  1895. PSEC_TO_NSEC(delay_ns)),
  1896. };
  1897. struct nand_operation op = NAND_OPERATION(instrs);
  1898. return nand_exec_op(chip, &op);
  1899. }
  1900. /* Apply delay or wait for ready/busy pin */
  1901. if (!chip->legacy.dev_ready)
  1902. udelay(chip->legacy.chip_delay);
  1903. else
  1904. nand_wait_ready(chip);
  1905. return 0;
  1906. }
  1907. /**
  1908. * nand_reset_op - Do a reset operation
  1909. * @chip: The NAND chip
  1910. *
  1911. * This function sends a RESET command and waits for the NAND to be ready
  1912. * before returning.
  1913. * This function does not select/unselect the CS line.
  1914. *
  1915. * Returns 0 on success, a negative error code otherwise.
  1916. */
  1917. int nand_reset_op(struct nand_chip *chip)
  1918. {
  1919. if (chip->exec_op) {
  1920. const struct nand_sdr_timings *sdr =
  1921. nand_get_sdr_timings(&chip->data_interface);
  1922. struct nand_op_instr instrs[] = {
  1923. NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
  1924. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
  1925. };
  1926. struct nand_operation op = NAND_OPERATION(instrs);
  1927. return nand_exec_op(chip, &op);
  1928. }
  1929. chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
  1930. return 0;
  1931. }
  1932. EXPORT_SYMBOL_GPL(nand_reset_op);
  1933. /**
  1934. * nand_read_data_op - Read data from the NAND
  1935. * @chip: The NAND chip
  1936. * @buf: buffer used to store the data
  1937. * @len: length of the buffer
  1938. * @force_8bit: force 8-bit bus access
  1939. *
  1940. * This function does a raw data read on the bus. Usually used after launching
  1941. * another NAND operation like nand_read_page_op().
  1942. * This function does not select/unselect the CS line.
  1943. *
  1944. * Returns 0 on success, a negative error code otherwise.
  1945. */
  1946. int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
  1947. bool force_8bit)
  1948. {
  1949. if (!len || !buf)
  1950. return -EINVAL;
  1951. if (chip->exec_op) {
  1952. struct nand_op_instr instrs[] = {
  1953. NAND_OP_DATA_IN(len, buf, 0),
  1954. };
  1955. struct nand_operation op = NAND_OPERATION(instrs);
  1956. instrs[0].ctx.data.force_8bit = force_8bit;
  1957. return nand_exec_op(chip, &op);
  1958. }
  1959. if (force_8bit) {
  1960. u8 *p = buf;
  1961. unsigned int i;
  1962. for (i = 0; i < len; i++)
  1963. p[i] = chip->legacy.read_byte(chip);
  1964. } else {
  1965. chip->legacy.read_buf(chip, buf, len);
  1966. }
  1967. return 0;
  1968. }
  1969. EXPORT_SYMBOL_GPL(nand_read_data_op);
  1970. /**
  1971. * nand_write_data_op - Write data from the NAND
  1972. * @chip: The NAND chip
  1973. * @buf: buffer containing the data to send on the bus
  1974. * @len: length of the buffer
  1975. * @force_8bit: force 8-bit bus access
  1976. *
  1977. * This function does a raw data write on the bus. Usually used after launching
  1978. * another NAND operation like nand_write_page_begin_op().
  1979. * This function does not select/unselect the CS line.
  1980. *
  1981. * Returns 0 on success, a negative error code otherwise.
  1982. */
  1983. int nand_write_data_op(struct nand_chip *chip, const void *buf,
  1984. unsigned int len, bool force_8bit)
  1985. {
  1986. if (!len || !buf)
  1987. return -EINVAL;
  1988. if (chip->exec_op) {
  1989. struct nand_op_instr instrs[] = {
  1990. NAND_OP_DATA_OUT(len, buf, 0),
  1991. };
  1992. struct nand_operation op = NAND_OPERATION(instrs);
  1993. instrs[0].ctx.data.force_8bit = force_8bit;
  1994. return nand_exec_op(chip, &op);
  1995. }
  1996. if (force_8bit) {
  1997. const u8 *p = buf;
  1998. unsigned int i;
  1999. for (i = 0; i < len; i++)
  2000. chip->legacy.write_byte(chip, p[i]);
  2001. } else {
  2002. chip->legacy.write_buf(chip, buf, len);
  2003. }
  2004. return 0;
  2005. }
  2006. EXPORT_SYMBOL_GPL(nand_write_data_op);
  2007. /**
  2008. * struct nand_op_parser_ctx - Context used by the parser
  2009. * @instrs: array of all the instructions that must be addressed
  2010. * @ninstrs: length of the @instrs array
  2011. * @subop: Sub-operation to be passed to the NAND controller
  2012. *
  2013. * This structure is used by the core to split NAND operations into
  2014. * sub-operations that can be handled by the NAND controller.
  2015. */
  2016. struct nand_op_parser_ctx {
  2017. const struct nand_op_instr *instrs;
  2018. unsigned int ninstrs;
  2019. struct nand_subop subop;
  2020. };
  2021. /**
  2022. * nand_op_parser_must_split_instr - Checks if an instruction must be split
  2023. * @pat: the parser pattern element that matches @instr
  2024. * @instr: pointer to the instruction to check
  2025. * @start_offset: this is an in/out parameter. If @instr has already been
  2026. * split, then @start_offset is the offset from which to start
  2027. * (either an address cycle or an offset in the data buffer).
  2028. * Conversely, if the function returns true (ie. instr must be
  2029. * split), this parameter is updated to point to the first
  2030. * data/address cycle that has not been taken care of.
  2031. *
  2032. * Some NAND controllers are limited and cannot send X address cycles with a
  2033. * unique operation, or cannot read/write more than Y bytes at the same time.
  2034. * In this case, split the instruction that does not fit in a single
  2035. * controller-operation into two or more chunks.
  2036. *
  2037. * Returns true if the instruction must be split, false otherwise.
  2038. * The @start_offset parameter is also updated to the offset at which the next
  2039. * bundle of instruction must start (if an address or a data instruction).
  2040. */
  2041. static bool
  2042. nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
  2043. const struct nand_op_instr *instr,
  2044. unsigned int *start_offset)
  2045. {
  2046. switch (pat->type) {
  2047. case NAND_OP_ADDR_INSTR:
  2048. if (!pat->ctx.addr.maxcycles)
  2049. break;
  2050. if (instr->ctx.addr.naddrs - *start_offset >
  2051. pat->ctx.addr.maxcycles) {
  2052. *start_offset += pat->ctx.addr.maxcycles;
  2053. return true;
  2054. }
  2055. break;
  2056. case NAND_OP_DATA_IN_INSTR:
  2057. case NAND_OP_DATA_OUT_INSTR:
  2058. if (!pat->ctx.data.maxlen)
  2059. break;
  2060. if (instr->ctx.data.len - *start_offset >
  2061. pat->ctx.data.maxlen) {
  2062. *start_offset += pat->ctx.data.maxlen;
  2063. return true;
  2064. }
  2065. break;
  2066. default:
  2067. break;
  2068. }
  2069. return false;
  2070. }
  2071. /**
  2072. * nand_op_parser_match_pat - Checks if a pattern matches the instructions
  2073. * remaining in the parser context
  2074. * @pat: the pattern to test
  2075. * @ctx: the parser context structure to match with the pattern @pat
  2076. *
  2077. * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
  2078. * Returns true if this is the case, false ortherwise. When true is returned,
  2079. * @ctx->subop is updated with the set of instructions to be passed to the
  2080. * controller driver.
  2081. */
  2082. static bool
  2083. nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
  2084. struct nand_op_parser_ctx *ctx)
  2085. {
  2086. unsigned int instr_offset = ctx->subop.first_instr_start_off;
  2087. const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
  2088. const struct nand_op_instr *instr = ctx->subop.instrs;
  2089. unsigned int i, ninstrs;
  2090. for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
  2091. /*
  2092. * The pattern instruction does not match the operation
  2093. * instruction. If the instruction is marked optional in the
  2094. * pattern definition, we skip the pattern element and continue
  2095. * to the next one. If the element is mandatory, there's no
  2096. * match and we can return false directly.
  2097. */
  2098. if (instr->type != pat->elems[i].type) {
  2099. if (!pat->elems[i].optional)
  2100. return false;
  2101. continue;
  2102. }
  2103. /*
  2104. * Now check the pattern element constraints. If the pattern is
  2105. * not able to handle the whole instruction in a single step,
  2106. * we have to split it.
  2107. * The last_instr_end_off value comes back updated to point to
  2108. * the position where we have to split the instruction (the
  2109. * start of the next subop chunk).
  2110. */
  2111. if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
  2112. &instr_offset)) {
  2113. ninstrs++;
  2114. i++;
  2115. break;
  2116. }
  2117. instr++;
  2118. ninstrs++;
  2119. instr_offset = 0;
  2120. }
  2121. /*
  2122. * This can happen if all instructions of a pattern are optional.
  2123. * Still, if there's not at least one instruction handled by this
  2124. * pattern, this is not a match, and we should try the next one (if
  2125. * any).
  2126. */
  2127. if (!ninstrs)
  2128. return false;
  2129. /*
  2130. * We had a match on the pattern head, but the pattern may be longer
  2131. * than the instructions we're asked to execute. We need to make sure
  2132. * there's no mandatory elements in the pattern tail.
  2133. */
  2134. for (; i < pat->nelems; i++) {
  2135. if (!pat->elems[i].optional)
  2136. return false;
  2137. }
  2138. /*
  2139. * We have a match: update the subop structure accordingly and return
  2140. * true.
  2141. */
  2142. ctx->subop.ninstrs = ninstrs;
  2143. ctx->subop.last_instr_end_off = instr_offset;
  2144. return true;
  2145. }
  2146. #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
  2147. static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
  2148. {
  2149. const struct nand_op_instr *instr;
  2150. char *prefix = " ";
  2151. unsigned int i;
  2152. pr_debug("executing subop:\n");
  2153. for (i = 0; i < ctx->ninstrs; i++) {
  2154. instr = &ctx->instrs[i];
  2155. if (instr == &ctx->subop.instrs[0])
  2156. prefix = " ->";
  2157. switch (instr->type) {
  2158. case NAND_OP_CMD_INSTR:
  2159. pr_debug("%sCMD [0x%02x]\n", prefix,
  2160. instr->ctx.cmd.opcode);
  2161. break;
  2162. case NAND_OP_ADDR_INSTR:
  2163. pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
  2164. instr->ctx.addr.naddrs,
  2165. instr->ctx.addr.naddrs < 64 ?
  2166. instr->ctx.addr.naddrs : 64,
  2167. instr->ctx.addr.addrs);
  2168. break;
  2169. case NAND_OP_DATA_IN_INSTR:
  2170. pr_debug("%sDATA_IN [%d B%s]\n", prefix,
  2171. instr->ctx.data.len,
  2172. instr->ctx.data.force_8bit ?
  2173. ", force 8-bit" : "");
  2174. break;
  2175. case NAND_OP_DATA_OUT_INSTR:
  2176. pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
  2177. instr->ctx.data.len,
  2178. instr->ctx.data.force_8bit ?
  2179. ", force 8-bit" : "");
  2180. break;
  2181. case NAND_OP_WAITRDY_INSTR:
  2182. pr_debug("%sWAITRDY [max %d ms]\n", prefix,
  2183. instr->ctx.waitrdy.timeout_ms);
  2184. break;
  2185. }
  2186. if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
  2187. prefix = " ";
  2188. }
  2189. }
  2190. #else
  2191. static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
  2192. {
  2193. /* NOP */
  2194. }
  2195. #endif
  2196. /**
  2197. * nand_op_parser_exec_op - exec_op parser
  2198. * @chip: the NAND chip
  2199. * @parser: patterns description provided by the controller driver
  2200. * @op: the NAND operation to address
  2201. * @check_only: when true, the function only checks if @op can be handled but
  2202. * does not execute the operation
  2203. *
  2204. * Helper function designed to ease integration of NAND controller drivers that
  2205. * only support a limited set of instruction sequences. The supported sequences
  2206. * are described in @parser, and the framework takes care of splitting @op into
  2207. * multiple sub-operations (if required) and pass them back to the ->exec()
  2208. * callback of the matching pattern if @check_only is set to false.
  2209. *
  2210. * NAND controller drivers should call this function from their own ->exec_op()
  2211. * implementation.
  2212. *
  2213. * Returns 0 on success, a negative error code otherwise. A failure can be
  2214. * caused by an unsupported operation (none of the supported patterns is able
  2215. * to handle the requested operation), or an error returned by one of the
  2216. * matching pattern->exec() hook.
  2217. */
  2218. int nand_op_parser_exec_op(struct nand_chip *chip,
  2219. const struct nand_op_parser *parser,
  2220. const struct nand_operation *op, bool check_only)
  2221. {
  2222. struct nand_op_parser_ctx ctx = {
  2223. .subop.instrs = op->instrs,
  2224. .instrs = op->instrs,
  2225. .ninstrs = op->ninstrs,
  2226. };
  2227. unsigned int i;
  2228. while (ctx.subop.instrs < op->instrs + op->ninstrs) {
  2229. int ret;
  2230. for (i = 0; i < parser->npatterns; i++) {
  2231. const struct nand_op_parser_pattern *pattern;
  2232. pattern = &parser->patterns[i];
  2233. if (!nand_op_parser_match_pat(pattern, &ctx))
  2234. continue;
  2235. nand_op_parser_trace(&ctx);
  2236. if (check_only)
  2237. break;
  2238. ret = pattern->exec(chip, &ctx.subop);
  2239. if (ret)
  2240. return ret;
  2241. break;
  2242. }
  2243. if (i == parser->npatterns) {
  2244. pr_debug("->exec_op() parser: pattern not found!\n");
  2245. return -ENOTSUPP;
  2246. }
  2247. /*
  2248. * Update the context structure by pointing to the start of the
  2249. * next subop.
  2250. */
  2251. ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
  2252. if (ctx.subop.last_instr_end_off)
  2253. ctx.subop.instrs -= 1;
  2254. ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
  2255. }
  2256. return 0;
  2257. }
  2258. EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
  2259. static bool nand_instr_is_data(const struct nand_op_instr *instr)
  2260. {
  2261. return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
  2262. instr->type == NAND_OP_DATA_OUT_INSTR);
  2263. }
  2264. static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
  2265. unsigned int instr_idx)
  2266. {
  2267. return subop && instr_idx < subop->ninstrs;
  2268. }
  2269. static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
  2270. unsigned int instr_idx)
  2271. {
  2272. if (instr_idx)
  2273. return 0;
  2274. return subop->first_instr_start_off;
  2275. }
  2276. /**
  2277. * nand_subop_get_addr_start_off - Get the start offset in an address array
  2278. * @subop: The entire sub-operation
  2279. * @instr_idx: Index of the instruction inside the sub-operation
  2280. *
  2281. * During driver development, one could be tempted to directly use the
  2282. * ->addr.addrs field of address instructions. This is wrong as address
  2283. * instructions might be split.
  2284. *
  2285. * Given an address instruction, returns the offset of the first cycle to issue.
  2286. */
  2287. unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
  2288. unsigned int instr_idx)
  2289. {
  2290. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2291. subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
  2292. return 0;
  2293. return nand_subop_get_start_off(subop, instr_idx);
  2294. }
  2295. EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
  2296. /**
  2297. * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
  2298. * @subop: The entire sub-operation
  2299. * @instr_idx: Index of the instruction inside the sub-operation
  2300. *
  2301. * During driver development, one could be tempted to directly use the
  2302. * ->addr->naddrs field of a data instruction. This is wrong as instructions
  2303. * might be split.
  2304. *
  2305. * Given an address instruction, returns the number of address cycle to issue.
  2306. */
  2307. unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
  2308. unsigned int instr_idx)
  2309. {
  2310. int start_off, end_off;
  2311. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2312. subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
  2313. return 0;
  2314. start_off = nand_subop_get_addr_start_off(subop, instr_idx);
  2315. if (instr_idx == subop->ninstrs - 1 &&
  2316. subop->last_instr_end_off)
  2317. end_off = subop->last_instr_end_off;
  2318. else
  2319. end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
  2320. return end_off - start_off;
  2321. }
  2322. EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
  2323. /**
  2324. * nand_subop_get_data_start_off - Get the start offset in a data array
  2325. * @subop: The entire sub-operation
  2326. * @instr_idx: Index of the instruction inside the sub-operation
  2327. *
  2328. * During driver development, one could be tempted to directly use the
  2329. * ->data->buf.{in,out} field of data instructions. This is wrong as data
  2330. * instructions might be split.
  2331. *
  2332. * Given a data instruction, returns the offset to start from.
  2333. */
  2334. unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
  2335. unsigned int instr_idx)
  2336. {
  2337. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2338. !nand_instr_is_data(&subop->instrs[instr_idx])))
  2339. return 0;
  2340. return nand_subop_get_start_off(subop, instr_idx);
  2341. }
  2342. EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
  2343. /**
  2344. * nand_subop_get_data_len - Get the number of bytes to retrieve
  2345. * @subop: The entire sub-operation
  2346. * @instr_idx: Index of the instruction inside the sub-operation
  2347. *
  2348. * During driver development, one could be tempted to directly use the
  2349. * ->data->len field of a data instruction. This is wrong as data instructions
  2350. * might be split.
  2351. *
  2352. * Returns the length of the chunk of data to send/receive.
  2353. */
  2354. unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
  2355. unsigned int instr_idx)
  2356. {
  2357. int start_off = 0, end_off;
  2358. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2359. !nand_instr_is_data(&subop->instrs[instr_idx])))
  2360. return 0;
  2361. start_off = nand_subop_get_data_start_off(subop, instr_idx);
  2362. if (instr_idx == subop->ninstrs - 1 &&
  2363. subop->last_instr_end_off)
  2364. end_off = subop->last_instr_end_off;
  2365. else
  2366. end_off = subop->instrs[instr_idx].ctx.data.len;
  2367. return end_off - start_off;
  2368. }
  2369. EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
  2370. /**
  2371. * nand_reset - Reset and initialize a NAND device
  2372. * @chip: The NAND chip
  2373. * @chipnr: Internal die id
  2374. *
  2375. * Save the timings data structure, then apply SDR timings mode 0 (see
  2376. * nand_reset_data_interface for details), do the reset operation, and
  2377. * apply back the previous timings.
  2378. *
  2379. * Returns 0 on success, a negative error code otherwise.
  2380. */
  2381. int nand_reset(struct nand_chip *chip, int chipnr)
  2382. {
  2383. struct nand_data_interface saved_data_intf = chip->data_interface;
  2384. int ret;
  2385. ret = nand_reset_data_interface(chip, chipnr);
  2386. if (ret)
  2387. return ret;
  2388. /*
  2389. * The CS line has to be released before we can apply the new NAND
  2390. * interface settings, hence this weird ->select_chip() dance.
  2391. */
  2392. chip->select_chip(chip, chipnr);
  2393. ret = nand_reset_op(chip);
  2394. chip->select_chip(chip, -1);
  2395. if (ret)
  2396. return ret;
  2397. /*
  2398. * A nand_reset_data_interface() put both the NAND chip and the NAND
  2399. * controller in timings mode 0. If the default mode for this chip is
  2400. * also 0, no need to proceed to the change again. Plus, at probe time,
  2401. * nand_setup_data_interface() uses ->set/get_features() which would
  2402. * fail anyway as the parameter page is not available yet.
  2403. */
  2404. if (!chip->onfi_timing_mode_default)
  2405. return 0;
  2406. chip->data_interface = saved_data_intf;
  2407. ret = nand_setup_data_interface(chip, chipnr);
  2408. if (ret)
  2409. return ret;
  2410. return 0;
  2411. }
  2412. EXPORT_SYMBOL_GPL(nand_reset);
  2413. /**
  2414. * nand_get_features - wrapper to perform a GET_FEATURE
  2415. * @chip: NAND chip info structure
  2416. * @addr: feature address
  2417. * @subfeature_param: the subfeature parameters, a four bytes array
  2418. *
  2419. * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
  2420. * operation cannot be handled.
  2421. */
  2422. int nand_get_features(struct nand_chip *chip, int addr,
  2423. u8 *subfeature_param)
  2424. {
  2425. if (!nand_supports_get_features(chip, addr))
  2426. return -ENOTSUPP;
  2427. if (chip->legacy.get_features)
  2428. return chip->legacy.get_features(chip, addr, subfeature_param);
  2429. return nand_get_features_op(chip, addr, subfeature_param);
  2430. }
  2431. EXPORT_SYMBOL_GPL(nand_get_features);
  2432. /**
  2433. * nand_set_features - wrapper to perform a SET_FEATURE
  2434. * @chip: NAND chip info structure
  2435. * @addr: feature address
  2436. * @subfeature_param: the subfeature parameters, a four bytes array
  2437. *
  2438. * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
  2439. * operation cannot be handled.
  2440. */
  2441. int nand_set_features(struct nand_chip *chip, int addr,
  2442. u8 *subfeature_param)
  2443. {
  2444. if (!nand_supports_set_features(chip, addr))
  2445. return -ENOTSUPP;
  2446. if (chip->legacy.set_features)
  2447. return chip->legacy.set_features(chip, addr, subfeature_param);
  2448. return nand_set_features_op(chip, addr, subfeature_param);
  2449. }
  2450. EXPORT_SYMBOL_GPL(nand_set_features);
  2451. /**
  2452. * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
  2453. * @buf: buffer to test
  2454. * @len: buffer length
  2455. * @bitflips_threshold: maximum number of bitflips
  2456. *
  2457. * Check if a buffer contains only 0xff, which means the underlying region
  2458. * has been erased and is ready to be programmed.
  2459. * The bitflips_threshold specify the maximum number of bitflips before
  2460. * considering the region is not erased.
  2461. * Note: The logic of this function has been extracted from the memweight
  2462. * implementation, except that nand_check_erased_buf function exit before
  2463. * testing the whole buffer if the number of bitflips exceed the
  2464. * bitflips_threshold value.
  2465. *
  2466. * Returns a positive number of bitflips less than or equal to
  2467. * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
  2468. * threshold.
  2469. */
  2470. static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
  2471. {
  2472. const unsigned char *bitmap = buf;
  2473. int bitflips = 0;
  2474. int weight;
  2475. for (; len && ((uintptr_t)bitmap) % sizeof(long);
  2476. len--, bitmap++) {
  2477. weight = hweight8(*bitmap);
  2478. bitflips += BITS_PER_BYTE - weight;
  2479. if (unlikely(bitflips > bitflips_threshold))
  2480. return -EBADMSG;
  2481. }
  2482. for (; len >= sizeof(long);
  2483. len -= sizeof(long), bitmap += sizeof(long)) {
  2484. unsigned long d = *((unsigned long *)bitmap);
  2485. if (d == ~0UL)
  2486. continue;
  2487. weight = hweight_long(d);
  2488. bitflips += BITS_PER_LONG - weight;
  2489. if (unlikely(bitflips > bitflips_threshold))
  2490. return -EBADMSG;
  2491. }
  2492. for (; len > 0; len--, bitmap++) {
  2493. weight = hweight8(*bitmap);
  2494. bitflips += BITS_PER_BYTE - weight;
  2495. if (unlikely(bitflips > bitflips_threshold))
  2496. return -EBADMSG;
  2497. }
  2498. return bitflips;
  2499. }
  2500. /**
  2501. * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
  2502. * 0xff data
  2503. * @data: data buffer to test
  2504. * @datalen: data length
  2505. * @ecc: ECC buffer
  2506. * @ecclen: ECC length
  2507. * @extraoob: extra OOB buffer
  2508. * @extraooblen: extra OOB length
  2509. * @bitflips_threshold: maximum number of bitflips
  2510. *
  2511. * Check if a data buffer and its associated ECC and OOB data contains only
  2512. * 0xff pattern, which means the underlying region has been erased and is
  2513. * ready to be programmed.
  2514. * The bitflips_threshold specify the maximum number of bitflips before
  2515. * considering the region as not erased.
  2516. *
  2517. * Note:
  2518. * 1/ ECC algorithms are working on pre-defined block sizes which are usually
  2519. * different from the NAND page size. When fixing bitflips, ECC engines will
  2520. * report the number of errors per chunk, and the NAND core infrastructure
  2521. * expect you to return the maximum number of bitflips for the whole page.
  2522. * This is why you should always use this function on a single chunk and
  2523. * not on the whole page. After checking each chunk you should update your
  2524. * max_bitflips value accordingly.
  2525. * 2/ When checking for bitflips in erased pages you should not only check
  2526. * the payload data but also their associated ECC data, because a user might
  2527. * have programmed almost all bits to 1 but a few. In this case, we
  2528. * shouldn't consider the chunk as erased, and checking ECC bytes prevent
  2529. * this case.
  2530. * 3/ The extraoob argument is optional, and should be used if some of your OOB
  2531. * data are protected by the ECC engine.
  2532. * It could also be used if you support subpages and want to attach some
  2533. * extra OOB data to an ECC chunk.
  2534. *
  2535. * Returns a positive number of bitflips less than or equal to
  2536. * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
  2537. * threshold. In case of success, the passed buffers are filled with 0xff.
  2538. */
  2539. int nand_check_erased_ecc_chunk(void *data, int datalen,
  2540. void *ecc, int ecclen,
  2541. void *extraoob, int extraooblen,
  2542. int bitflips_threshold)
  2543. {
  2544. int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
  2545. data_bitflips = nand_check_erased_buf(data, datalen,
  2546. bitflips_threshold);
  2547. if (data_bitflips < 0)
  2548. return data_bitflips;
  2549. bitflips_threshold -= data_bitflips;
  2550. ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
  2551. if (ecc_bitflips < 0)
  2552. return ecc_bitflips;
  2553. bitflips_threshold -= ecc_bitflips;
  2554. extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
  2555. bitflips_threshold);
  2556. if (extraoob_bitflips < 0)
  2557. return extraoob_bitflips;
  2558. if (data_bitflips)
  2559. memset(data, 0xff, datalen);
  2560. if (ecc_bitflips)
  2561. memset(ecc, 0xff, ecclen);
  2562. if (extraoob_bitflips)
  2563. memset(extraoob, 0xff, extraooblen);
  2564. return data_bitflips + ecc_bitflips + extraoob_bitflips;
  2565. }
  2566. EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
  2567. /**
  2568. * nand_read_page_raw_notsupp - dummy read raw page function
  2569. * @chip: nand chip info structure
  2570. * @buf: buffer to store read data
  2571. * @oob_required: caller requires OOB data read to chip->oob_poi
  2572. * @page: page number to read
  2573. *
  2574. * Returns -ENOTSUPP unconditionally.
  2575. */
  2576. int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
  2577. int oob_required, int page)
  2578. {
  2579. return -ENOTSUPP;
  2580. }
  2581. EXPORT_SYMBOL(nand_read_page_raw_notsupp);
  2582. /**
  2583. * nand_read_page_raw - [INTERN] read raw page data without ecc
  2584. * @chip: nand chip info structure
  2585. * @buf: buffer to store read data
  2586. * @oob_required: caller requires OOB data read to chip->oob_poi
  2587. * @page: page number to read
  2588. *
  2589. * Not for syndrome calculating ECC controllers, which use a special oob layout.
  2590. */
  2591. int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
  2592. int page)
  2593. {
  2594. struct mtd_info *mtd = nand_to_mtd(chip);
  2595. int ret;
  2596. ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
  2597. if (ret)
  2598. return ret;
  2599. if (oob_required) {
  2600. ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
  2601. false);
  2602. if (ret)
  2603. return ret;
  2604. }
  2605. return 0;
  2606. }
  2607. EXPORT_SYMBOL(nand_read_page_raw);
  2608. /**
  2609. * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
  2610. * @chip: nand chip info structure
  2611. * @buf: buffer to store read data
  2612. * @oob_required: caller requires OOB data read to chip->oob_poi
  2613. * @page: page number to read
  2614. *
  2615. * We need a special oob layout and handling even when OOB isn't used.
  2616. */
  2617. static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
  2618. int oob_required, int page)
  2619. {
  2620. struct mtd_info *mtd = nand_to_mtd(chip);
  2621. int eccsize = chip->ecc.size;
  2622. int eccbytes = chip->ecc.bytes;
  2623. uint8_t *oob = chip->oob_poi;
  2624. int steps, size, ret;
  2625. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2626. if (ret)
  2627. return ret;
  2628. for (steps = chip->ecc.steps; steps > 0; steps--) {
  2629. ret = nand_read_data_op(chip, buf, eccsize, false);
  2630. if (ret)
  2631. return ret;
  2632. buf += eccsize;
  2633. if (chip->ecc.prepad) {
  2634. ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
  2635. false);
  2636. if (ret)
  2637. return ret;
  2638. oob += chip->ecc.prepad;
  2639. }
  2640. ret = nand_read_data_op(chip, oob, eccbytes, false);
  2641. if (ret)
  2642. return ret;
  2643. oob += eccbytes;
  2644. if (chip->ecc.postpad) {
  2645. ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
  2646. false);
  2647. if (ret)
  2648. return ret;
  2649. oob += chip->ecc.postpad;
  2650. }
  2651. }
  2652. size = mtd->oobsize - (oob - chip->oob_poi);
  2653. if (size) {
  2654. ret = nand_read_data_op(chip, oob, size, false);
  2655. if (ret)
  2656. return ret;
  2657. }
  2658. return 0;
  2659. }
  2660. /**
  2661. * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
  2662. * @chip: nand chip info structure
  2663. * @buf: buffer to store read data
  2664. * @oob_required: caller requires OOB data read to chip->oob_poi
  2665. * @page: page number to read
  2666. */
  2667. static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
  2668. int oob_required, int page)
  2669. {
  2670. struct mtd_info *mtd = nand_to_mtd(chip);
  2671. int i, eccsize = chip->ecc.size, ret;
  2672. int eccbytes = chip->ecc.bytes;
  2673. int eccsteps = chip->ecc.steps;
  2674. uint8_t *p = buf;
  2675. uint8_t *ecc_calc = chip->ecc.calc_buf;
  2676. uint8_t *ecc_code = chip->ecc.code_buf;
  2677. unsigned int max_bitflips = 0;
  2678. chip->ecc.read_page_raw(chip, buf, 1, page);
  2679. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
  2680. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  2681. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2682. chip->ecc.total);
  2683. if (ret)
  2684. return ret;
  2685. eccsteps = chip->ecc.steps;
  2686. p = buf;
  2687. for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2688. int stat;
  2689. stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
  2690. if (stat < 0) {
  2691. mtd->ecc_stats.failed++;
  2692. } else {
  2693. mtd->ecc_stats.corrected += stat;
  2694. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2695. }
  2696. }
  2697. return max_bitflips;
  2698. }
  2699. /**
  2700. * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
  2701. * @chip: nand chip info structure
  2702. * @data_offs: offset of requested data within the page
  2703. * @readlen: data length
  2704. * @bufpoi: buffer to store read data
  2705. * @page: page number to read
  2706. */
  2707. static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
  2708. uint32_t readlen, uint8_t *bufpoi, int page)
  2709. {
  2710. struct mtd_info *mtd = nand_to_mtd(chip);
  2711. int start_step, end_step, num_steps, ret;
  2712. uint8_t *p;
  2713. int data_col_addr, i, gaps = 0;
  2714. int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
  2715. int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
  2716. int index, section = 0;
  2717. unsigned int max_bitflips = 0;
  2718. struct mtd_oob_region oobregion = { };
  2719. /* Column address within the page aligned to ECC size (256bytes) */
  2720. start_step = data_offs / chip->ecc.size;
  2721. end_step = (data_offs + readlen - 1) / chip->ecc.size;
  2722. num_steps = end_step - start_step + 1;
  2723. index = start_step * chip->ecc.bytes;
  2724. /* Data size aligned to ECC ecc.size */
  2725. datafrag_len = num_steps * chip->ecc.size;
  2726. eccfrag_len = num_steps * chip->ecc.bytes;
  2727. data_col_addr = start_step * chip->ecc.size;
  2728. /* If we read not a page aligned data */
  2729. p = bufpoi + data_col_addr;
  2730. ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
  2731. if (ret)
  2732. return ret;
  2733. /* Calculate ECC */
  2734. for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
  2735. chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
  2736. /*
  2737. * The performance is faster if we position offsets according to
  2738. * ecc.pos. Let's make sure that there are no gaps in ECC positions.
  2739. */
  2740. ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
  2741. if (ret)
  2742. return ret;
  2743. if (oobregion.length < eccfrag_len)
  2744. gaps = 1;
  2745. if (gaps) {
  2746. ret = nand_change_read_column_op(chip, mtd->writesize,
  2747. chip->oob_poi, mtd->oobsize,
  2748. false);
  2749. if (ret)
  2750. return ret;
  2751. } else {
  2752. /*
  2753. * Send the command to read the particular ECC bytes take care
  2754. * about buswidth alignment in read_buf.
  2755. */
  2756. aligned_pos = oobregion.offset & ~(busw - 1);
  2757. aligned_len = eccfrag_len;
  2758. if (oobregion.offset & (busw - 1))
  2759. aligned_len++;
  2760. if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
  2761. (busw - 1))
  2762. aligned_len++;
  2763. ret = nand_change_read_column_op(chip,
  2764. mtd->writesize + aligned_pos,
  2765. &chip->oob_poi[aligned_pos],
  2766. aligned_len, false);
  2767. if (ret)
  2768. return ret;
  2769. }
  2770. ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
  2771. chip->oob_poi, index, eccfrag_len);
  2772. if (ret)
  2773. return ret;
  2774. p = bufpoi + data_col_addr;
  2775. for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
  2776. int stat;
  2777. stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
  2778. &chip->ecc.calc_buf[i]);
  2779. if (stat == -EBADMSG &&
  2780. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2781. /* check for empty pages with bitflips */
  2782. stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
  2783. &chip->ecc.code_buf[i],
  2784. chip->ecc.bytes,
  2785. NULL, 0,
  2786. chip->ecc.strength);
  2787. }
  2788. if (stat < 0) {
  2789. mtd->ecc_stats.failed++;
  2790. } else {
  2791. mtd->ecc_stats.corrected += stat;
  2792. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2793. }
  2794. }
  2795. return max_bitflips;
  2796. }
  2797. /**
  2798. * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
  2799. * @chip: nand chip info structure
  2800. * @buf: buffer to store read data
  2801. * @oob_required: caller requires OOB data read to chip->oob_poi
  2802. * @page: page number to read
  2803. *
  2804. * Not for syndrome calculating ECC controllers which need a special oob layout.
  2805. */
  2806. static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
  2807. int oob_required, int page)
  2808. {
  2809. struct mtd_info *mtd = nand_to_mtd(chip);
  2810. int i, eccsize = chip->ecc.size, ret;
  2811. int eccbytes = chip->ecc.bytes;
  2812. int eccsteps = chip->ecc.steps;
  2813. uint8_t *p = buf;
  2814. uint8_t *ecc_calc = chip->ecc.calc_buf;
  2815. uint8_t *ecc_code = chip->ecc.code_buf;
  2816. unsigned int max_bitflips = 0;
  2817. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2818. if (ret)
  2819. return ret;
  2820. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2821. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2822. ret = nand_read_data_op(chip, p, eccsize, false);
  2823. if (ret)
  2824. return ret;
  2825. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  2826. }
  2827. ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
  2828. if (ret)
  2829. return ret;
  2830. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2831. chip->ecc.total);
  2832. if (ret)
  2833. return ret;
  2834. eccsteps = chip->ecc.steps;
  2835. p = buf;
  2836. for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2837. int stat;
  2838. stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
  2839. if (stat == -EBADMSG &&
  2840. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2841. /* check for empty pages with bitflips */
  2842. stat = nand_check_erased_ecc_chunk(p, eccsize,
  2843. &ecc_code[i], eccbytes,
  2844. NULL, 0,
  2845. chip->ecc.strength);
  2846. }
  2847. if (stat < 0) {
  2848. mtd->ecc_stats.failed++;
  2849. } else {
  2850. mtd->ecc_stats.corrected += stat;
  2851. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2852. }
  2853. }
  2854. return max_bitflips;
  2855. }
  2856. /**
  2857. * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
  2858. * @chip: nand chip info structure
  2859. * @buf: buffer to store read data
  2860. * @oob_required: caller requires OOB data read to chip->oob_poi
  2861. * @page: page number to read
  2862. *
  2863. * Hardware ECC for large page chips, require OOB to be read first. For this
  2864. * ECC mode, the write_page method is re-used from ECC_HW. These methods
  2865. * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
  2866. * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
  2867. * the data area, by overwriting the NAND manufacturer bad block markings.
  2868. */
  2869. static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
  2870. int oob_required, int page)
  2871. {
  2872. struct mtd_info *mtd = nand_to_mtd(chip);
  2873. int i, eccsize = chip->ecc.size, ret;
  2874. int eccbytes = chip->ecc.bytes;
  2875. int eccsteps = chip->ecc.steps;
  2876. uint8_t *p = buf;
  2877. uint8_t *ecc_code = chip->ecc.code_buf;
  2878. uint8_t *ecc_calc = chip->ecc.calc_buf;
  2879. unsigned int max_bitflips = 0;
  2880. /* Read the OOB area first */
  2881. ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
  2882. if (ret)
  2883. return ret;
  2884. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2885. if (ret)
  2886. return ret;
  2887. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2888. chip->ecc.total);
  2889. if (ret)
  2890. return ret;
  2891. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2892. int stat;
  2893. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2894. ret = nand_read_data_op(chip, p, eccsize, false);
  2895. if (ret)
  2896. return ret;
  2897. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  2898. stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
  2899. if (stat == -EBADMSG &&
  2900. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2901. /* check for empty pages with bitflips */
  2902. stat = nand_check_erased_ecc_chunk(p, eccsize,
  2903. &ecc_code[i], eccbytes,
  2904. NULL, 0,
  2905. chip->ecc.strength);
  2906. }
  2907. if (stat < 0) {
  2908. mtd->ecc_stats.failed++;
  2909. } else {
  2910. mtd->ecc_stats.corrected += stat;
  2911. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2912. }
  2913. }
  2914. return max_bitflips;
  2915. }
  2916. /**
  2917. * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
  2918. * @chip: nand chip info structure
  2919. * @buf: buffer to store read data
  2920. * @oob_required: caller requires OOB data read to chip->oob_poi
  2921. * @page: page number to read
  2922. *
  2923. * The hw generator calculates the error syndrome automatically. Therefore we
  2924. * need a special oob layout and handling.
  2925. */
  2926. static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
  2927. int oob_required, int page)
  2928. {
  2929. struct mtd_info *mtd = nand_to_mtd(chip);
  2930. int ret, i, eccsize = chip->ecc.size;
  2931. int eccbytes = chip->ecc.bytes;
  2932. int eccsteps = chip->ecc.steps;
  2933. int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
  2934. uint8_t *p = buf;
  2935. uint8_t *oob = chip->oob_poi;
  2936. unsigned int max_bitflips = 0;
  2937. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2938. if (ret)
  2939. return ret;
  2940. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2941. int stat;
  2942. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2943. ret = nand_read_data_op(chip, p, eccsize, false);
  2944. if (ret)
  2945. return ret;
  2946. if (chip->ecc.prepad) {
  2947. ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
  2948. false);
  2949. if (ret)
  2950. return ret;
  2951. oob += chip->ecc.prepad;
  2952. }
  2953. chip->ecc.hwctl(chip, NAND_ECC_READSYN);
  2954. ret = nand_read_data_op(chip, oob, eccbytes, false);
  2955. if (ret)
  2956. return ret;
  2957. stat = chip->ecc.correct(chip, p, oob, NULL);
  2958. oob += eccbytes;
  2959. if (chip->ecc.postpad) {
  2960. ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
  2961. false);
  2962. if (ret)
  2963. return ret;
  2964. oob += chip->ecc.postpad;
  2965. }
  2966. if (stat == -EBADMSG &&
  2967. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2968. /* check for empty pages with bitflips */
  2969. stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
  2970. oob - eccpadbytes,
  2971. eccpadbytes,
  2972. NULL, 0,
  2973. chip->ecc.strength);
  2974. }
  2975. if (stat < 0) {
  2976. mtd->ecc_stats.failed++;
  2977. } else {
  2978. mtd->ecc_stats.corrected += stat;
  2979. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2980. }
  2981. }
  2982. /* Calculate remaining oob bytes */
  2983. i = mtd->oobsize - (oob - chip->oob_poi);
  2984. if (i) {
  2985. ret = nand_read_data_op(chip, oob, i, false);
  2986. if (ret)
  2987. return ret;
  2988. }
  2989. return max_bitflips;
  2990. }
  2991. /**
  2992. * nand_transfer_oob - [INTERN] Transfer oob to client buffer
  2993. * @mtd: mtd info structure
  2994. * @oob: oob destination address
  2995. * @ops: oob ops structure
  2996. * @len: size of oob to transfer
  2997. */
  2998. static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
  2999. struct mtd_oob_ops *ops, size_t len)
  3000. {
  3001. struct nand_chip *chip = mtd_to_nand(mtd);
  3002. int ret;
  3003. switch (ops->mode) {
  3004. case MTD_OPS_PLACE_OOB:
  3005. case MTD_OPS_RAW:
  3006. memcpy(oob, chip->oob_poi + ops->ooboffs, len);
  3007. return oob + len;
  3008. case MTD_OPS_AUTO_OOB:
  3009. ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
  3010. ops->ooboffs, len);
  3011. BUG_ON(ret);
  3012. return oob + len;
  3013. default:
  3014. BUG();
  3015. }
  3016. return NULL;
  3017. }
  3018. /**
  3019. * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
  3020. * @chip: NAND chip object
  3021. * @retry_mode: the retry mode to use
  3022. *
  3023. * Some vendors supply a special command to shift the Vt threshold, to be used
  3024. * when there are too many bitflips in a page (i.e., ECC error). After setting
  3025. * a new threshold, the host should retry reading the page.
  3026. */
  3027. static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
  3028. {
  3029. pr_debug("setting READ RETRY mode %d\n", retry_mode);
  3030. if (retry_mode >= chip->read_retries)
  3031. return -EINVAL;
  3032. if (!chip->setup_read_retry)
  3033. return -EOPNOTSUPP;
  3034. return chip->setup_read_retry(chip, retry_mode);
  3035. }
  3036. static void nand_wait_readrdy(struct nand_chip *chip)
  3037. {
  3038. const struct nand_sdr_timings *sdr;
  3039. if (!(chip->options & NAND_NEED_READRDY))
  3040. return;
  3041. sdr = nand_get_sdr_timings(&chip->data_interface);
  3042. WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
  3043. }
  3044. /**
  3045. * nand_do_read_ops - [INTERN] Read data with ECC
  3046. * @mtd: MTD device structure
  3047. * @from: offset to read from
  3048. * @ops: oob ops structure
  3049. *
  3050. * Internal function. Called with chip held.
  3051. */
  3052. static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
  3053. struct mtd_oob_ops *ops)
  3054. {
  3055. int chipnr, page, realpage, col, bytes, aligned, oob_required;
  3056. struct nand_chip *chip = mtd_to_nand(mtd);
  3057. int ret = 0;
  3058. uint32_t readlen = ops->len;
  3059. uint32_t oobreadlen = ops->ooblen;
  3060. uint32_t max_oobsize = mtd_oobavail(mtd, ops);
  3061. uint8_t *bufpoi, *oob, *buf;
  3062. int use_bufpoi;
  3063. unsigned int max_bitflips = 0;
  3064. int retry_mode = 0;
  3065. bool ecc_fail = false;
  3066. chipnr = (int)(from >> chip->chip_shift);
  3067. chip->select_chip(chip, chipnr);
  3068. realpage = (int)(from >> chip->page_shift);
  3069. page = realpage & chip->pagemask;
  3070. col = (int)(from & (mtd->writesize - 1));
  3071. buf = ops->datbuf;
  3072. oob = ops->oobbuf;
  3073. oob_required = oob ? 1 : 0;
  3074. while (1) {
  3075. unsigned int ecc_failures = mtd->ecc_stats.failed;
  3076. bytes = min(mtd->writesize - col, readlen);
  3077. aligned = (bytes == mtd->writesize);
  3078. if (!aligned)
  3079. use_bufpoi = 1;
  3080. else if (chip->options & NAND_USE_BOUNCE_BUFFER)
  3081. use_bufpoi = !virt_addr_valid(buf) ||
  3082. !IS_ALIGNED((unsigned long)buf,
  3083. chip->buf_align);
  3084. else
  3085. use_bufpoi = 0;
  3086. /* Is the current page in the buffer? */
  3087. if (realpage != chip->pagebuf || oob) {
  3088. bufpoi = use_bufpoi ? chip->data_buf : buf;
  3089. if (use_bufpoi && aligned)
  3090. pr_debug("%s: using read bounce buffer for buf@%p\n",
  3091. __func__, buf);
  3092. read_retry:
  3093. /*
  3094. * Now read the page into the buffer. Absent an error,
  3095. * the read methods return max bitflips per ecc step.
  3096. */
  3097. if (unlikely(ops->mode == MTD_OPS_RAW))
  3098. ret = chip->ecc.read_page_raw(chip, bufpoi,
  3099. oob_required,
  3100. page);
  3101. else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
  3102. !oob)
  3103. ret = chip->ecc.read_subpage(chip, col, bytes,
  3104. bufpoi, page);
  3105. else
  3106. ret = chip->ecc.read_page(chip, bufpoi,
  3107. oob_required, page);
  3108. if (ret < 0) {
  3109. if (use_bufpoi)
  3110. /* Invalidate page cache */
  3111. chip->pagebuf = -1;
  3112. break;
  3113. }
  3114. /* Transfer not aligned data */
  3115. if (use_bufpoi) {
  3116. if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
  3117. !(mtd->ecc_stats.failed - ecc_failures) &&
  3118. (ops->mode != MTD_OPS_RAW)) {
  3119. chip->pagebuf = realpage;
  3120. chip->pagebuf_bitflips = ret;
  3121. } else {
  3122. /* Invalidate page cache */
  3123. chip->pagebuf = -1;
  3124. }
  3125. memcpy(buf, chip->data_buf + col, bytes);
  3126. }
  3127. if (unlikely(oob)) {
  3128. int toread = min(oobreadlen, max_oobsize);
  3129. if (toread) {
  3130. oob = nand_transfer_oob(mtd,
  3131. oob, ops, toread);
  3132. oobreadlen -= toread;
  3133. }
  3134. }
  3135. nand_wait_readrdy(chip);
  3136. if (mtd->ecc_stats.failed - ecc_failures) {
  3137. if (retry_mode + 1 < chip->read_retries) {
  3138. retry_mode++;
  3139. ret = nand_setup_read_retry(chip,
  3140. retry_mode);
  3141. if (ret < 0)
  3142. break;
  3143. /* Reset failures; retry */
  3144. mtd->ecc_stats.failed = ecc_failures;
  3145. goto read_retry;
  3146. } else {
  3147. /* No more retry modes; real failure */
  3148. ecc_fail = true;
  3149. }
  3150. }
  3151. buf += bytes;
  3152. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  3153. } else {
  3154. memcpy(buf, chip->data_buf + col, bytes);
  3155. buf += bytes;
  3156. max_bitflips = max_t(unsigned int, max_bitflips,
  3157. chip->pagebuf_bitflips);
  3158. }
  3159. readlen -= bytes;
  3160. /* Reset to retry mode 0 */
  3161. if (retry_mode) {
  3162. ret = nand_setup_read_retry(chip, 0);
  3163. if (ret < 0)
  3164. break;
  3165. retry_mode = 0;
  3166. }
  3167. if (!readlen)
  3168. break;
  3169. /* For subsequent reads align to page boundary */
  3170. col = 0;
  3171. /* Increment page address */
  3172. realpage++;
  3173. page = realpage & chip->pagemask;
  3174. /* Check, if we cross a chip boundary */
  3175. if (!page) {
  3176. chipnr++;
  3177. chip->select_chip(chip, -1);
  3178. chip->select_chip(chip, chipnr);
  3179. }
  3180. }
  3181. chip->select_chip(chip, -1);
  3182. ops->retlen = ops->len - (size_t) readlen;
  3183. if (oob)
  3184. ops->oobretlen = ops->ooblen - oobreadlen;
  3185. if (ret < 0)
  3186. return ret;
  3187. if (ecc_fail)
  3188. return -EBADMSG;
  3189. return max_bitflips;
  3190. }
  3191. /**
  3192. * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
  3193. * @chip: nand chip info structure
  3194. * @page: page number to read
  3195. */
  3196. int nand_read_oob_std(struct nand_chip *chip, int page)
  3197. {
  3198. struct mtd_info *mtd = nand_to_mtd(chip);
  3199. return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
  3200. }
  3201. EXPORT_SYMBOL(nand_read_oob_std);
  3202. /**
  3203. * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
  3204. * with syndromes
  3205. * @chip: nand chip info structure
  3206. * @page: page number to read
  3207. */
  3208. int nand_read_oob_syndrome(struct nand_chip *chip, int page)
  3209. {
  3210. struct mtd_info *mtd = nand_to_mtd(chip);
  3211. int length = mtd->oobsize;
  3212. int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
  3213. int eccsize = chip->ecc.size;
  3214. uint8_t *bufpoi = chip->oob_poi;
  3215. int i, toread, sndrnd = 0, pos, ret;
  3216. ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
  3217. if (ret)
  3218. return ret;
  3219. for (i = 0; i < chip->ecc.steps; i++) {
  3220. if (sndrnd) {
  3221. int ret;
  3222. pos = eccsize + i * (eccsize + chunk);
  3223. if (mtd->writesize > 512)
  3224. ret = nand_change_read_column_op(chip, pos,
  3225. NULL, 0,
  3226. false);
  3227. else
  3228. ret = nand_read_page_op(chip, page, pos, NULL,
  3229. 0);
  3230. if (ret)
  3231. return ret;
  3232. } else
  3233. sndrnd = 1;
  3234. toread = min_t(int, length, chunk);
  3235. ret = nand_read_data_op(chip, bufpoi, toread, false);
  3236. if (ret)
  3237. return ret;
  3238. bufpoi += toread;
  3239. length -= toread;
  3240. }
  3241. if (length > 0) {
  3242. ret = nand_read_data_op(chip, bufpoi, length, false);
  3243. if (ret)
  3244. return ret;
  3245. }
  3246. return 0;
  3247. }
  3248. EXPORT_SYMBOL(nand_read_oob_syndrome);
  3249. /**
  3250. * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
  3251. * @chip: nand chip info structure
  3252. * @page: page number to write
  3253. */
  3254. int nand_write_oob_std(struct nand_chip *chip, int page)
  3255. {
  3256. struct mtd_info *mtd = nand_to_mtd(chip);
  3257. return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
  3258. mtd->oobsize);
  3259. }
  3260. EXPORT_SYMBOL(nand_write_oob_std);
  3261. /**
  3262. * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
  3263. * with syndrome - only for large page flash
  3264. * @chip: nand chip info structure
  3265. * @page: page number to write
  3266. */
  3267. int nand_write_oob_syndrome(struct nand_chip *chip, int page)
  3268. {
  3269. struct mtd_info *mtd = nand_to_mtd(chip);
  3270. int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
  3271. int eccsize = chip->ecc.size, length = mtd->oobsize;
  3272. int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
  3273. const uint8_t *bufpoi = chip->oob_poi;
  3274. /*
  3275. * data-ecc-data-ecc ... ecc-oob
  3276. * or
  3277. * data-pad-ecc-pad-data-pad .... ecc-pad-oob
  3278. */
  3279. if (!chip->ecc.prepad && !chip->ecc.postpad) {
  3280. pos = steps * (eccsize + chunk);
  3281. steps = 0;
  3282. } else
  3283. pos = eccsize;
  3284. ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
  3285. if (ret)
  3286. return ret;
  3287. for (i = 0; i < steps; i++) {
  3288. if (sndcmd) {
  3289. if (mtd->writesize <= 512) {
  3290. uint32_t fill = 0xFFFFFFFF;
  3291. len = eccsize;
  3292. while (len > 0) {
  3293. int num = min_t(int, len, 4);
  3294. ret = nand_write_data_op(chip, &fill,
  3295. num, false);
  3296. if (ret)
  3297. return ret;
  3298. len -= num;
  3299. }
  3300. } else {
  3301. pos = eccsize + i * (eccsize + chunk);
  3302. ret = nand_change_write_column_op(chip, pos,
  3303. NULL, 0,
  3304. false);
  3305. if (ret)
  3306. return ret;
  3307. }
  3308. } else
  3309. sndcmd = 1;
  3310. len = min_t(int, length, chunk);
  3311. ret = nand_write_data_op(chip, bufpoi, len, false);
  3312. if (ret)
  3313. return ret;
  3314. bufpoi += len;
  3315. length -= len;
  3316. }
  3317. if (length > 0) {
  3318. ret = nand_write_data_op(chip, bufpoi, length, false);
  3319. if (ret)
  3320. return ret;
  3321. }
  3322. return nand_prog_page_end_op(chip);
  3323. }
  3324. EXPORT_SYMBOL(nand_write_oob_syndrome);
  3325. /**
  3326. * nand_do_read_oob - [INTERN] NAND read out-of-band
  3327. * @mtd: MTD device structure
  3328. * @from: offset to read from
  3329. * @ops: oob operations description structure
  3330. *
  3331. * NAND read out-of-band data from the spare area.
  3332. */
  3333. static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
  3334. struct mtd_oob_ops *ops)
  3335. {
  3336. unsigned int max_bitflips = 0;
  3337. int page, realpage, chipnr;
  3338. struct nand_chip *chip = mtd_to_nand(mtd);
  3339. struct mtd_ecc_stats stats;
  3340. int readlen = ops->ooblen;
  3341. int len;
  3342. uint8_t *buf = ops->oobbuf;
  3343. int ret = 0;
  3344. pr_debug("%s: from = 0x%08Lx, len = %i\n",
  3345. __func__, (unsigned long long)from, readlen);
  3346. stats = mtd->ecc_stats;
  3347. len = mtd_oobavail(mtd, ops);
  3348. chipnr = (int)(from >> chip->chip_shift);
  3349. chip->select_chip(chip, chipnr);
  3350. /* Shift to get page */
  3351. realpage = (int)(from >> chip->page_shift);
  3352. page = realpage & chip->pagemask;
  3353. while (1) {
  3354. if (ops->mode == MTD_OPS_RAW)
  3355. ret = chip->ecc.read_oob_raw(chip, page);
  3356. else
  3357. ret = chip->ecc.read_oob(chip, page);
  3358. if (ret < 0)
  3359. break;
  3360. len = min(len, readlen);
  3361. buf = nand_transfer_oob(mtd, buf, ops, len);
  3362. nand_wait_readrdy(chip);
  3363. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  3364. readlen -= len;
  3365. if (!readlen)
  3366. break;
  3367. /* Increment page address */
  3368. realpage++;
  3369. page = realpage & chip->pagemask;
  3370. /* Check, if we cross a chip boundary */
  3371. if (!page) {
  3372. chipnr++;
  3373. chip->select_chip(chip, -1);
  3374. chip->select_chip(chip, chipnr);
  3375. }
  3376. }
  3377. chip->select_chip(chip, -1);
  3378. ops->oobretlen = ops->ooblen - readlen;
  3379. if (ret < 0)
  3380. return ret;
  3381. if (mtd->ecc_stats.failed - stats.failed)
  3382. return -EBADMSG;
  3383. return max_bitflips;
  3384. }
  3385. /**
  3386. * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
  3387. * @mtd: MTD device structure
  3388. * @from: offset to read from
  3389. * @ops: oob operation description structure
  3390. *
  3391. * NAND read data and/or out-of-band data.
  3392. */
  3393. static int nand_read_oob(struct mtd_info *mtd, loff_t from,
  3394. struct mtd_oob_ops *ops)
  3395. {
  3396. int ret;
  3397. ops->retlen = 0;
  3398. if (ops->mode != MTD_OPS_PLACE_OOB &&
  3399. ops->mode != MTD_OPS_AUTO_OOB &&
  3400. ops->mode != MTD_OPS_RAW)
  3401. return -ENOTSUPP;
  3402. nand_get_device(mtd, FL_READING);
  3403. if (!ops->datbuf)
  3404. ret = nand_do_read_oob(mtd, from, ops);
  3405. else
  3406. ret = nand_do_read_ops(mtd, from, ops);
  3407. nand_release_device(mtd);
  3408. return ret;
  3409. }
  3410. /**
  3411. * nand_write_page_raw_notsupp - dummy raw page write function
  3412. * @chip: nand chip info structure
  3413. * @buf: data buffer
  3414. * @oob_required: must write chip->oob_poi to OOB
  3415. * @page: page number to write
  3416. *
  3417. * Returns -ENOTSUPP unconditionally.
  3418. */
  3419. int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
  3420. int oob_required, int page)
  3421. {
  3422. return -ENOTSUPP;
  3423. }
  3424. EXPORT_SYMBOL(nand_write_page_raw_notsupp);
  3425. /**
  3426. * nand_write_page_raw - [INTERN] raw page write function
  3427. * @chip: nand chip info structure
  3428. * @buf: data buffer
  3429. * @oob_required: must write chip->oob_poi to OOB
  3430. * @page: page number to write
  3431. *
  3432. * Not for syndrome calculating ECC controllers, which use a special oob layout.
  3433. */
  3434. int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
  3435. int oob_required, int page)
  3436. {
  3437. struct mtd_info *mtd = nand_to_mtd(chip);
  3438. int ret;
  3439. ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
  3440. if (ret)
  3441. return ret;
  3442. if (oob_required) {
  3443. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
  3444. false);
  3445. if (ret)
  3446. return ret;
  3447. }
  3448. return nand_prog_page_end_op(chip);
  3449. }
  3450. EXPORT_SYMBOL(nand_write_page_raw);
  3451. /**
  3452. * nand_write_page_raw_syndrome - [INTERN] raw page write function
  3453. * @chip: nand chip info structure
  3454. * @buf: data buffer
  3455. * @oob_required: must write chip->oob_poi to OOB
  3456. * @page: page number to write
  3457. *
  3458. * We need a special oob layout and handling even when ECC isn't checked.
  3459. */
  3460. static int nand_write_page_raw_syndrome(struct nand_chip *chip,
  3461. const uint8_t *buf, int oob_required,
  3462. int page)
  3463. {
  3464. struct mtd_info *mtd = nand_to_mtd(chip);
  3465. int eccsize = chip->ecc.size;
  3466. int eccbytes = chip->ecc.bytes;
  3467. uint8_t *oob = chip->oob_poi;
  3468. int steps, size, ret;
  3469. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3470. if (ret)
  3471. return ret;
  3472. for (steps = chip->ecc.steps; steps > 0; steps--) {
  3473. ret = nand_write_data_op(chip, buf, eccsize, false);
  3474. if (ret)
  3475. return ret;
  3476. buf += eccsize;
  3477. if (chip->ecc.prepad) {
  3478. ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
  3479. false);
  3480. if (ret)
  3481. return ret;
  3482. oob += chip->ecc.prepad;
  3483. }
  3484. ret = nand_write_data_op(chip, oob, eccbytes, false);
  3485. if (ret)
  3486. return ret;
  3487. oob += eccbytes;
  3488. if (chip->ecc.postpad) {
  3489. ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
  3490. false);
  3491. if (ret)
  3492. return ret;
  3493. oob += chip->ecc.postpad;
  3494. }
  3495. }
  3496. size = mtd->oobsize - (oob - chip->oob_poi);
  3497. if (size) {
  3498. ret = nand_write_data_op(chip, oob, size, false);
  3499. if (ret)
  3500. return ret;
  3501. }
  3502. return nand_prog_page_end_op(chip);
  3503. }
  3504. /**
  3505. * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
  3506. * @chip: nand chip info structure
  3507. * @buf: data buffer
  3508. * @oob_required: must write chip->oob_poi to OOB
  3509. * @page: page number to write
  3510. */
  3511. static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
  3512. int oob_required, int page)
  3513. {
  3514. struct mtd_info *mtd = nand_to_mtd(chip);
  3515. int i, eccsize = chip->ecc.size, ret;
  3516. int eccbytes = chip->ecc.bytes;
  3517. int eccsteps = chip->ecc.steps;
  3518. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3519. const uint8_t *p = buf;
  3520. /* Software ECC calculation */
  3521. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
  3522. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  3523. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3524. chip->ecc.total);
  3525. if (ret)
  3526. return ret;
  3527. return chip->ecc.write_page_raw(chip, buf, 1, page);
  3528. }
  3529. /**
  3530. * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
  3531. * @chip: nand chip info structure
  3532. * @buf: data buffer
  3533. * @oob_required: must write chip->oob_poi to OOB
  3534. * @page: page number to write
  3535. */
  3536. static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
  3537. int oob_required, int page)
  3538. {
  3539. struct mtd_info *mtd = nand_to_mtd(chip);
  3540. int i, eccsize = chip->ecc.size, ret;
  3541. int eccbytes = chip->ecc.bytes;
  3542. int eccsteps = chip->ecc.steps;
  3543. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3544. const uint8_t *p = buf;
  3545. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3546. if (ret)
  3547. return ret;
  3548. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  3549. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3550. ret = nand_write_data_op(chip, p, eccsize, false);
  3551. if (ret)
  3552. return ret;
  3553. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  3554. }
  3555. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3556. chip->ecc.total);
  3557. if (ret)
  3558. return ret;
  3559. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
  3560. if (ret)
  3561. return ret;
  3562. return nand_prog_page_end_op(chip);
  3563. }
  3564. /**
  3565. * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
  3566. * @chip: nand chip info structure
  3567. * @offset: column address of subpage within the page
  3568. * @data_len: data length
  3569. * @buf: data buffer
  3570. * @oob_required: must write chip->oob_poi to OOB
  3571. * @page: page number to write
  3572. */
  3573. static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
  3574. uint32_t data_len, const uint8_t *buf,
  3575. int oob_required, int page)
  3576. {
  3577. struct mtd_info *mtd = nand_to_mtd(chip);
  3578. uint8_t *oob_buf = chip->oob_poi;
  3579. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3580. int ecc_size = chip->ecc.size;
  3581. int ecc_bytes = chip->ecc.bytes;
  3582. int ecc_steps = chip->ecc.steps;
  3583. uint32_t start_step = offset / ecc_size;
  3584. uint32_t end_step = (offset + data_len - 1) / ecc_size;
  3585. int oob_bytes = mtd->oobsize / ecc_steps;
  3586. int step, ret;
  3587. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3588. if (ret)
  3589. return ret;
  3590. for (step = 0; step < ecc_steps; step++) {
  3591. /* configure controller for WRITE access */
  3592. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3593. /* write data (untouched subpages already masked by 0xFF) */
  3594. ret = nand_write_data_op(chip, buf, ecc_size, false);
  3595. if (ret)
  3596. return ret;
  3597. /* mask ECC of un-touched subpages by padding 0xFF */
  3598. if ((step < start_step) || (step > end_step))
  3599. memset(ecc_calc, 0xff, ecc_bytes);
  3600. else
  3601. chip->ecc.calculate(chip, buf, ecc_calc);
  3602. /* mask OOB of un-touched subpages by padding 0xFF */
  3603. /* if oob_required, preserve OOB metadata of written subpage */
  3604. if (!oob_required || (step < start_step) || (step > end_step))
  3605. memset(oob_buf, 0xff, oob_bytes);
  3606. buf += ecc_size;
  3607. ecc_calc += ecc_bytes;
  3608. oob_buf += oob_bytes;
  3609. }
  3610. /* copy calculated ECC for whole page to chip->buffer->oob */
  3611. /* this include masked-value(0xFF) for unwritten subpages */
  3612. ecc_calc = chip->ecc.calc_buf;
  3613. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3614. chip->ecc.total);
  3615. if (ret)
  3616. return ret;
  3617. /* write OOB buffer to NAND device */
  3618. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
  3619. if (ret)
  3620. return ret;
  3621. return nand_prog_page_end_op(chip);
  3622. }
  3623. /**
  3624. * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
  3625. * @chip: nand chip info structure
  3626. * @buf: data buffer
  3627. * @oob_required: must write chip->oob_poi to OOB
  3628. * @page: page number to write
  3629. *
  3630. * The hw generator calculates the error syndrome automatically. Therefore we
  3631. * need a special oob layout and handling.
  3632. */
  3633. static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
  3634. int oob_required, int page)
  3635. {
  3636. struct mtd_info *mtd = nand_to_mtd(chip);
  3637. int i, eccsize = chip->ecc.size;
  3638. int eccbytes = chip->ecc.bytes;
  3639. int eccsteps = chip->ecc.steps;
  3640. const uint8_t *p = buf;
  3641. uint8_t *oob = chip->oob_poi;
  3642. int ret;
  3643. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3644. if (ret)
  3645. return ret;
  3646. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  3647. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3648. ret = nand_write_data_op(chip, p, eccsize, false);
  3649. if (ret)
  3650. return ret;
  3651. if (chip->ecc.prepad) {
  3652. ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
  3653. false);
  3654. if (ret)
  3655. return ret;
  3656. oob += chip->ecc.prepad;
  3657. }
  3658. chip->ecc.calculate(chip, p, oob);
  3659. ret = nand_write_data_op(chip, oob, eccbytes, false);
  3660. if (ret)
  3661. return ret;
  3662. oob += eccbytes;
  3663. if (chip->ecc.postpad) {
  3664. ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
  3665. false);
  3666. if (ret)
  3667. return ret;
  3668. oob += chip->ecc.postpad;
  3669. }
  3670. }
  3671. /* Calculate remaining oob bytes */
  3672. i = mtd->oobsize - (oob - chip->oob_poi);
  3673. if (i) {
  3674. ret = nand_write_data_op(chip, oob, i, false);
  3675. if (ret)
  3676. return ret;
  3677. }
  3678. return nand_prog_page_end_op(chip);
  3679. }
  3680. /**
  3681. * nand_write_page - write one page
  3682. * @mtd: MTD device structure
  3683. * @chip: NAND chip descriptor
  3684. * @offset: address offset within the page
  3685. * @data_len: length of actual data to be written
  3686. * @buf: the data to write
  3687. * @oob_required: must write chip->oob_poi to OOB
  3688. * @page: page number to write
  3689. * @raw: use _raw version of write_page
  3690. */
  3691. static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  3692. uint32_t offset, int data_len, const uint8_t *buf,
  3693. int oob_required, int page, int raw)
  3694. {
  3695. int status, subpage;
  3696. if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
  3697. chip->ecc.write_subpage)
  3698. subpage = offset || (data_len < mtd->writesize);
  3699. else
  3700. subpage = 0;
  3701. if (unlikely(raw))
  3702. status = chip->ecc.write_page_raw(chip, buf, oob_required,
  3703. page);
  3704. else if (subpage)
  3705. status = chip->ecc.write_subpage(chip, offset, data_len, buf,
  3706. oob_required, page);
  3707. else
  3708. status = chip->ecc.write_page(chip, buf, oob_required, page);
  3709. if (status < 0)
  3710. return status;
  3711. return 0;
  3712. }
  3713. /**
  3714. * nand_fill_oob - [INTERN] Transfer client buffer to oob
  3715. * @mtd: MTD device structure
  3716. * @oob: oob data buffer
  3717. * @len: oob data write length
  3718. * @ops: oob ops structure
  3719. */
  3720. static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
  3721. struct mtd_oob_ops *ops)
  3722. {
  3723. struct nand_chip *chip = mtd_to_nand(mtd);
  3724. int ret;
  3725. /*
  3726. * Initialise to all 0xFF, to avoid the possibility of left over OOB
  3727. * data from a previous OOB read.
  3728. */
  3729. memset(chip->oob_poi, 0xff, mtd->oobsize);
  3730. switch (ops->mode) {
  3731. case MTD_OPS_PLACE_OOB:
  3732. case MTD_OPS_RAW:
  3733. memcpy(chip->oob_poi + ops->ooboffs, oob, len);
  3734. return oob + len;
  3735. case MTD_OPS_AUTO_OOB:
  3736. ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
  3737. ops->ooboffs, len);
  3738. BUG_ON(ret);
  3739. return oob + len;
  3740. default:
  3741. BUG();
  3742. }
  3743. return NULL;
  3744. }
  3745. #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
  3746. /**
  3747. * nand_do_write_ops - [INTERN] NAND write with ECC
  3748. * @mtd: MTD device structure
  3749. * @to: offset to write to
  3750. * @ops: oob operations description structure
  3751. *
  3752. * NAND write with ECC.
  3753. */
  3754. static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
  3755. struct mtd_oob_ops *ops)
  3756. {
  3757. int chipnr, realpage, page, column;
  3758. struct nand_chip *chip = mtd_to_nand(mtd);
  3759. uint32_t writelen = ops->len;
  3760. uint32_t oobwritelen = ops->ooblen;
  3761. uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
  3762. uint8_t *oob = ops->oobbuf;
  3763. uint8_t *buf = ops->datbuf;
  3764. int ret;
  3765. int oob_required = oob ? 1 : 0;
  3766. ops->retlen = 0;
  3767. if (!writelen)
  3768. return 0;
  3769. /* Reject writes, which are not page aligned */
  3770. if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
  3771. pr_notice("%s: attempt to write non page aligned data\n",
  3772. __func__);
  3773. return -EINVAL;
  3774. }
  3775. column = to & (mtd->writesize - 1);
  3776. chipnr = (int)(to >> chip->chip_shift);
  3777. chip->select_chip(chip, chipnr);
  3778. /* Check, if it is write protected */
  3779. if (nand_check_wp(mtd)) {
  3780. ret = -EIO;
  3781. goto err_out;
  3782. }
  3783. realpage = (int)(to >> chip->page_shift);
  3784. page = realpage & chip->pagemask;
  3785. /* Invalidate the page cache, when we write to the cached page */
  3786. if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
  3787. ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
  3788. chip->pagebuf = -1;
  3789. /* Don't allow multipage oob writes with offset */
  3790. if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
  3791. ret = -EINVAL;
  3792. goto err_out;
  3793. }
  3794. while (1) {
  3795. int bytes = mtd->writesize;
  3796. uint8_t *wbuf = buf;
  3797. int use_bufpoi;
  3798. int part_pagewr = (column || writelen < mtd->writesize);
  3799. if (part_pagewr)
  3800. use_bufpoi = 1;
  3801. else if (chip->options & NAND_USE_BOUNCE_BUFFER)
  3802. use_bufpoi = !virt_addr_valid(buf) ||
  3803. !IS_ALIGNED((unsigned long)buf,
  3804. chip->buf_align);
  3805. else
  3806. use_bufpoi = 0;
  3807. /* Partial page write?, or need to use bounce buffer */
  3808. if (use_bufpoi) {
  3809. pr_debug("%s: using write bounce buffer for buf@%p\n",
  3810. __func__, buf);
  3811. if (part_pagewr)
  3812. bytes = min_t(int, bytes - column, writelen);
  3813. chip->pagebuf = -1;
  3814. memset(chip->data_buf, 0xff, mtd->writesize);
  3815. memcpy(&chip->data_buf[column], buf, bytes);
  3816. wbuf = chip->data_buf;
  3817. }
  3818. if (unlikely(oob)) {
  3819. size_t len = min(oobwritelen, oobmaxlen);
  3820. oob = nand_fill_oob(mtd, oob, len, ops);
  3821. oobwritelen -= len;
  3822. } else {
  3823. /* We still need to erase leftover OOB data */
  3824. memset(chip->oob_poi, 0xff, mtd->oobsize);
  3825. }
  3826. ret = nand_write_page(mtd, chip, column, bytes, wbuf,
  3827. oob_required, page,
  3828. (ops->mode == MTD_OPS_RAW));
  3829. if (ret)
  3830. break;
  3831. writelen -= bytes;
  3832. if (!writelen)
  3833. break;
  3834. column = 0;
  3835. buf += bytes;
  3836. realpage++;
  3837. page = realpage & chip->pagemask;
  3838. /* Check, if we cross a chip boundary */
  3839. if (!page) {
  3840. chipnr++;
  3841. chip->select_chip(chip, -1);
  3842. chip->select_chip(chip, chipnr);
  3843. }
  3844. }
  3845. ops->retlen = ops->len - writelen;
  3846. if (unlikely(oob))
  3847. ops->oobretlen = ops->ooblen;
  3848. err_out:
  3849. chip->select_chip(chip, -1);
  3850. return ret;
  3851. }
  3852. /**
  3853. * panic_nand_write - [MTD Interface] NAND write with ECC
  3854. * @mtd: MTD device structure
  3855. * @to: offset to write to
  3856. * @len: number of bytes to write
  3857. * @retlen: pointer to variable to store the number of written bytes
  3858. * @buf: the data to write
  3859. *
  3860. * NAND write with ECC. Used when performing writes in interrupt context, this
  3861. * may for example be called by mtdoops when writing an oops while in panic.
  3862. */
  3863. static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
  3864. size_t *retlen, const uint8_t *buf)
  3865. {
  3866. struct nand_chip *chip = mtd_to_nand(mtd);
  3867. int chipnr = (int)(to >> chip->chip_shift);
  3868. struct mtd_oob_ops ops;
  3869. int ret;
  3870. /* Grab the device */
  3871. panic_nand_get_device(chip, mtd, FL_WRITING);
  3872. chip->select_chip(chip, chipnr);
  3873. /* Wait for the device to get ready */
  3874. panic_nand_wait(chip, 400);
  3875. memset(&ops, 0, sizeof(ops));
  3876. ops.len = len;
  3877. ops.datbuf = (uint8_t *)buf;
  3878. ops.mode = MTD_OPS_PLACE_OOB;
  3879. ret = nand_do_write_ops(mtd, to, &ops);
  3880. *retlen = ops.retlen;
  3881. return ret;
  3882. }
  3883. /**
  3884. * nand_do_write_oob - [MTD Interface] NAND write out-of-band
  3885. * @mtd: MTD device structure
  3886. * @to: offset to write to
  3887. * @ops: oob operation description structure
  3888. *
  3889. * NAND write out-of-band.
  3890. */
  3891. static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
  3892. struct mtd_oob_ops *ops)
  3893. {
  3894. int chipnr, page, status, len;
  3895. struct nand_chip *chip = mtd_to_nand(mtd);
  3896. pr_debug("%s: to = 0x%08x, len = %i\n",
  3897. __func__, (unsigned int)to, (int)ops->ooblen);
  3898. len = mtd_oobavail(mtd, ops);
  3899. /* Do not allow write past end of page */
  3900. if ((ops->ooboffs + ops->ooblen) > len) {
  3901. pr_debug("%s: attempt to write past end of page\n",
  3902. __func__);
  3903. return -EINVAL;
  3904. }
  3905. chipnr = (int)(to >> chip->chip_shift);
  3906. /*
  3907. * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
  3908. * of my DiskOnChip 2000 test units) will clear the whole data page too
  3909. * if we don't do this. I have no clue why, but I seem to have 'fixed'
  3910. * it in the doc2000 driver in August 1999. dwmw2.
  3911. */
  3912. nand_reset(chip, chipnr);
  3913. chip->select_chip(chip, chipnr);
  3914. /* Shift to get page */
  3915. page = (int)(to >> chip->page_shift);
  3916. /* Check, if it is write protected */
  3917. if (nand_check_wp(mtd)) {
  3918. chip->select_chip(chip, -1);
  3919. return -EROFS;
  3920. }
  3921. /* Invalidate the page cache, if we write to the cached page */
  3922. if (page == chip->pagebuf)
  3923. chip->pagebuf = -1;
  3924. nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
  3925. if (ops->mode == MTD_OPS_RAW)
  3926. status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
  3927. else
  3928. status = chip->ecc.write_oob(chip, page & chip->pagemask);
  3929. chip->select_chip(chip, -1);
  3930. if (status)
  3931. return status;
  3932. ops->oobretlen = ops->ooblen;
  3933. return 0;
  3934. }
  3935. /**
  3936. * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
  3937. * @mtd: MTD device structure
  3938. * @to: offset to write to
  3939. * @ops: oob operation description structure
  3940. */
  3941. static int nand_write_oob(struct mtd_info *mtd, loff_t to,
  3942. struct mtd_oob_ops *ops)
  3943. {
  3944. int ret = -ENOTSUPP;
  3945. ops->retlen = 0;
  3946. nand_get_device(mtd, FL_WRITING);
  3947. switch (ops->mode) {
  3948. case MTD_OPS_PLACE_OOB:
  3949. case MTD_OPS_AUTO_OOB:
  3950. case MTD_OPS_RAW:
  3951. break;
  3952. default:
  3953. goto out;
  3954. }
  3955. if (!ops->datbuf)
  3956. ret = nand_do_write_oob(mtd, to, ops);
  3957. else
  3958. ret = nand_do_write_ops(mtd, to, ops);
  3959. out:
  3960. nand_release_device(mtd);
  3961. return ret;
  3962. }
  3963. /**
  3964. * single_erase - [GENERIC] NAND standard block erase command function
  3965. * @chip: NAND chip object
  3966. * @page: the page address of the block which will be erased
  3967. *
  3968. * Standard erase command for NAND chips. Returns NAND status.
  3969. */
  3970. static int single_erase(struct nand_chip *chip, int page)
  3971. {
  3972. unsigned int eraseblock;
  3973. /* Send commands to erase a block */
  3974. eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
  3975. return nand_erase_op(chip, eraseblock);
  3976. }
  3977. /**
  3978. * nand_erase - [MTD Interface] erase block(s)
  3979. * @mtd: MTD device structure
  3980. * @instr: erase instruction
  3981. *
  3982. * Erase one ore more blocks.
  3983. */
  3984. static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
  3985. {
  3986. return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
  3987. }
  3988. /**
  3989. * nand_erase_nand - [INTERN] erase block(s)
  3990. * @chip: NAND chip object
  3991. * @instr: erase instruction
  3992. * @allowbbt: allow erasing the bbt area
  3993. *
  3994. * Erase one ore more blocks.
  3995. */
  3996. int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
  3997. int allowbbt)
  3998. {
  3999. struct mtd_info *mtd = nand_to_mtd(chip);
  4000. int page, status, pages_per_block, ret, chipnr;
  4001. loff_t len;
  4002. pr_debug("%s: start = 0x%012llx, len = %llu\n",
  4003. __func__, (unsigned long long)instr->addr,
  4004. (unsigned long long)instr->len);
  4005. if (check_offs_len(mtd, instr->addr, instr->len))
  4006. return -EINVAL;
  4007. /* Grab the lock and see if the device is available */
  4008. nand_get_device(mtd, FL_ERASING);
  4009. /* Shift to get first page */
  4010. page = (int)(instr->addr >> chip->page_shift);
  4011. chipnr = (int)(instr->addr >> chip->chip_shift);
  4012. /* Calculate pages in each block */
  4013. pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  4014. /* Select the NAND device */
  4015. chip->select_chip(chip, chipnr);
  4016. /* Check, if it is write protected */
  4017. if (nand_check_wp(mtd)) {
  4018. pr_debug("%s: device is write protected!\n",
  4019. __func__);
  4020. ret = -EIO;
  4021. goto erase_exit;
  4022. }
  4023. /* Loop through the pages */
  4024. len = instr->len;
  4025. while (len) {
  4026. /* Check if we have a bad block, we do not erase bad blocks! */
  4027. if (nand_block_checkbad(mtd, ((loff_t) page) <<
  4028. chip->page_shift, allowbbt)) {
  4029. pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
  4030. __func__, page);
  4031. ret = -EIO;
  4032. goto erase_exit;
  4033. }
  4034. /*
  4035. * Invalidate the page cache, if we erase the block which
  4036. * contains the current cached page.
  4037. */
  4038. if (page <= chip->pagebuf && chip->pagebuf <
  4039. (page + pages_per_block))
  4040. chip->pagebuf = -1;
  4041. if (chip->legacy.erase)
  4042. status = chip->legacy.erase(chip,
  4043. page & chip->pagemask);
  4044. else
  4045. status = single_erase(chip, page & chip->pagemask);
  4046. /* See if block erase succeeded */
  4047. if (status) {
  4048. pr_debug("%s: failed erase, page 0x%08x\n",
  4049. __func__, page);
  4050. ret = -EIO;
  4051. instr->fail_addr =
  4052. ((loff_t)page << chip->page_shift);
  4053. goto erase_exit;
  4054. }
  4055. /* Increment page address and decrement length */
  4056. len -= (1ULL << chip->phys_erase_shift);
  4057. page += pages_per_block;
  4058. /* Check, if we cross a chip boundary */
  4059. if (len && !(page & chip->pagemask)) {
  4060. chipnr++;
  4061. chip->select_chip(chip, -1);
  4062. chip->select_chip(chip, chipnr);
  4063. }
  4064. }
  4065. ret = 0;
  4066. erase_exit:
  4067. /* Deselect and wake up anyone waiting on the device */
  4068. chip->select_chip(chip, -1);
  4069. nand_release_device(mtd);
  4070. /* Return more or less happy */
  4071. return ret;
  4072. }
  4073. /**
  4074. * nand_sync - [MTD Interface] sync
  4075. * @mtd: MTD device structure
  4076. *
  4077. * Sync is actually a wait for chip ready function.
  4078. */
  4079. static void nand_sync(struct mtd_info *mtd)
  4080. {
  4081. pr_debug("%s: called\n", __func__);
  4082. /* Grab the lock and see if the device is available */
  4083. nand_get_device(mtd, FL_SYNCING);
  4084. /* Release it and go back */
  4085. nand_release_device(mtd);
  4086. }
  4087. /**
  4088. * nand_block_isbad - [MTD Interface] Check if block at offset is bad
  4089. * @mtd: MTD device structure
  4090. * @offs: offset relative to mtd start
  4091. */
  4092. static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
  4093. {
  4094. struct nand_chip *chip = mtd_to_nand(mtd);
  4095. int chipnr = (int)(offs >> chip->chip_shift);
  4096. int ret;
  4097. /* Select the NAND device */
  4098. nand_get_device(mtd, FL_READING);
  4099. chip->select_chip(chip, chipnr);
  4100. ret = nand_block_checkbad(mtd, offs, 0);
  4101. chip->select_chip(chip, -1);
  4102. nand_release_device(mtd);
  4103. return ret;
  4104. }
  4105. /**
  4106. * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
  4107. * @mtd: MTD device structure
  4108. * @ofs: offset relative to mtd start
  4109. */
  4110. static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  4111. {
  4112. int ret;
  4113. ret = nand_block_isbad(mtd, ofs);
  4114. if (ret) {
  4115. /* If it was bad already, return success and do nothing */
  4116. if (ret > 0)
  4117. return 0;
  4118. return ret;
  4119. }
  4120. return nand_block_markbad_lowlevel(mtd, ofs);
  4121. }
  4122. /**
  4123. * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
  4124. * @mtd: MTD device structure
  4125. * @ofs: offset relative to mtd start
  4126. * @len: length of mtd
  4127. */
  4128. static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
  4129. {
  4130. struct nand_chip *chip = mtd_to_nand(mtd);
  4131. u32 part_start_block;
  4132. u32 part_end_block;
  4133. u32 part_start_die;
  4134. u32 part_end_die;
  4135. /*
  4136. * max_bb_per_die and blocks_per_die used to determine
  4137. * the maximum bad block count.
  4138. */
  4139. if (!chip->max_bb_per_die || !chip->blocks_per_die)
  4140. return -ENOTSUPP;
  4141. /* Get the start and end of the partition in erase blocks. */
  4142. part_start_block = mtd_div_by_eb(ofs, mtd);
  4143. part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
  4144. /* Get the start and end LUNs of the partition. */
  4145. part_start_die = part_start_block / chip->blocks_per_die;
  4146. part_end_die = part_end_block / chip->blocks_per_die;
  4147. /*
  4148. * Look up the bad blocks per unit and multiply by the number of units
  4149. * that the partition spans.
  4150. */
  4151. return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
  4152. }
  4153. /**
  4154. * nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
  4155. * @chip: nand chip info structure
  4156. * @addr: feature address.
  4157. * @subfeature_param: the subfeature parameters, a four bytes array.
  4158. *
  4159. * Should be used by NAND controller drivers that do not support the SET/GET
  4160. * FEATURES operations.
  4161. */
  4162. int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
  4163. u8 *subfeature_param)
  4164. {
  4165. return -ENOTSUPP;
  4166. }
  4167. EXPORT_SYMBOL(nand_get_set_features_notsupp);
  4168. /**
  4169. * nand_suspend - [MTD Interface] Suspend the NAND flash
  4170. * @mtd: MTD device structure
  4171. */
  4172. static int nand_suspend(struct mtd_info *mtd)
  4173. {
  4174. return nand_get_device(mtd, FL_PM_SUSPENDED);
  4175. }
  4176. /**
  4177. * nand_resume - [MTD Interface] Resume the NAND flash
  4178. * @mtd: MTD device structure
  4179. */
  4180. static void nand_resume(struct mtd_info *mtd)
  4181. {
  4182. struct nand_chip *chip = mtd_to_nand(mtd);
  4183. if (chip->state == FL_PM_SUSPENDED)
  4184. nand_release_device(mtd);
  4185. else
  4186. pr_err("%s called for a chip which is not in suspended state\n",
  4187. __func__);
  4188. }
  4189. /**
  4190. * nand_shutdown - [MTD Interface] Finish the current NAND operation and
  4191. * prevent further operations
  4192. * @mtd: MTD device structure
  4193. */
  4194. static void nand_shutdown(struct mtd_info *mtd)
  4195. {
  4196. nand_get_device(mtd, FL_PM_SUSPENDED);
  4197. }
  4198. /* Set default functions */
  4199. static void nand_set_defaults(struct nand_chip *chip)
  4200. {
  4201. unsigned int busw = chip->options & NAND_BUSWIDTH_16;
  4202. /* check for proper chip_delay setup, set 20us if not */
  4203. if (!chip->legacy.chip_delay)
  4204. chip->legacy.chip_delay = 20;
  4205. /* check, if a user supplied command function given */
  4206. if (!chip->legacy.cmdfunc && !chip->exec_op)
  4207. chip->legacy.cmdfunc = nand_command;
  4208. /* check, if a user supplied wait function given */
  4209. if (chip->legacy.waitfunc == NULL)
  4210. chip->legacy.waitfunc = nand_wait;
  4211. if (!chip->select_chip)
  4212. chip->select_chip = nand_select_chip;
  4213. /* If called twice, pointers that depend on busw may need to be reset */
  4214. if (!chip->legacy.read_byte || chip->legacy.read_byte == nand_read_byte)
  4215. chip->legacy.read_byte = busw ? nand_read_byte16 : nand_read_byte;
  4216. if (!chip->legacy.write_buf || chip->legacy.write_buf == nand_write_buf)
  4217. chip->legacy.write_buf = busw ? nand_write_buf16 : nand_write_buf;
  4218. if (!chip->legacy.write_byte || chip->legacy.write_byte == nand_write_byte)
  4219. chip->legacy.write_byte = busw ? nand_write_byte16 : nand_write_byte;
  4220. if (!chip->legacy.read_buf || chip->legacy.read_buf == nand_read_buf)
  4221. chip->legacy.read_buf = busw ? nand_read_buf16 : nand_read_buf;
  4222. if (!chip->controller) {
  4223. chip->controller = &chip->dummy_controller;
  4224. nand_controller_init(chip->controller);
  4225. }
  4226. if (!chip->buf_align)
  4227. chip->buf_align = 1;
  4228. }
  4229. /* Sanitize ONFI strings so we can safely print them */
  4230. static void sanitize_string(uint8_t *s, size_t len)
  4231. {
  4232. ssize_t i;
  4233. /* Null terminate */
  4234. s[len - 1] = 0;
  4235. /* Remove non printable chars */
  4236. for (i = 0; i < len - 1; i++) {
  4237. if (s[i] < ' ' || s[i] > 127)
  4238. s[i] = '?';
  4239. }
  4240. /* Remove trailing spaces */
  4241. strim(s);
  4242. }
  4243. static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
  4244. {
  4245. int i;
  4246. while (len--) {
  4247. crc ^= *p++ << 8;
  4248. for (i = 0; i < 8; i++)
  4249. crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
  4250. }
  4251. return crc;
  4252. }
  4253. /* Parse the Extended Parameter Page. */
  4254. static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
  4255. struct nand_onfi_params *p)
  4256. {
  4257. struct onfi_ext_param_page *ep;
  4258. struct onfi_ext_section *s;
  4259. struct onfi_ext_ecc_info *ecc;
  4260. uint8_t *cursor;
  4261. int ret;
  4262. int len;
  4263. int i;
  4264. len = le16_to_cpu(p->ext_param_page_length) * 16;
  4265. ep = kmalloc(len, GFP_KERNEL);
  4266. if (!ep)
  4267. return -ENOMEM;
  4268. /* Send our own NAND_CMD_PARAM. */
  4269. ret = nand_read_param_page_op(chip, 0, NULL, 0);
  4270. if (ret)
  4271. goto ext_out;
  4272. /* Use the Change Read Column command to skip the ONFI param pages. */
  4273. ret = nand_change_read_column_op(chip,
  4274. sizeof(*p) * p->num_of_param_pages,
  4275. ep, len, true);
  4276. if (ret)
  4277. goto ext_out;
  4278. ret = -EINVAL;
  4279. if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
  4280. != le16_to_cpu(ep->crc))) {
  4281. pr_debug("fail in the CRC.\n");
  4282. goto ext_out;
  4283. }
  4284. /*
  4285. * Check the signature.
  4286. * Do not strictly follow the ONFI spec, maybe changed in future.
  4287. */
  4288. if (strncmp(ep->sig, "EPPS", 4)) {
  4289. pr_debug("The signature is invalid.\n");
  4290. goto ext_out;
  4291. }
  4292. /* find the ECC section. */
  4293. cursor = (uint8_t *)(ep + 1);
  4294. for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
  4295. s = ep->sections + i;
  4296. if (s->type == ONFI_SECTION_TYPE_2)
  4297. break;
  4298. cursor += s->length * 16;
  4299. }
  4300. if (i == ONFI_EXT_SECTION_MAX) {
  4301. pr_debug("We can not find the ECC section.\n");
  4302. goto ext_out;
  4303. }
  4304. /* get the info we want. */
  4305. ecc = (struct onfi_ext_ecc_info *)cursor;
  4306. if (!ecc->codeword_size) {
  4307. pr_debug("Invalid codeword size\n");
  4308. goto ext_out;
  4309. }
  4310. chip->ecc_strength_ds = ecc->ecc_bits;
  4311. chip->ecc_step_ds = 1 << ecc->codeword_size;
  4312. ret = 0;
  4313. ext_out:
  4314. kfree(ep);
  4315. return ret;
  4316. }
  4317. /*
  4318. * Recover data with bit-wise majority
  4319. */
  4320. static void nand_bit_wise_majority(const void **srcbufs,
  4321. unsigned int nsrcbufs,
  4322. void *dstbuf,
  4323. unsigned int bufsize)
  4324. {
  4325. int i, j, k;
  4326. for (i = 0; i < bufsize; i++) {
  4327. u8 val = 0;
  4328. for (j = 0; j < 8; j++) {
  4329. unsigned int cnt = 0;
  4330. for (k = 0; k < nsrcbufs; k++) {
  4331. const u8 *srcbuf = srcbufs[k];
  4332. if (srcbuf[i] & BIT(j))
  4333. cnt++;
  4334. }
  4335. if (cnt > nsrcbufs / 2)
  4336. val |= BIT(j);
  4337. }
  4338. ((u8 *)dstbuf)[i] = val;
  4339. }
  4340. }
  4341. /*
  4342. * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  4343. */
  4344. static int nand_flash_detect_onfi(struct nand_chip *chip)
  4345. {
  4346. struct mtd_info *mtd = nand_to_mtd(chip);
  4347. struct nand_onfi_params *p;
  4348. struct onfi_params *onfi;
  4349. int onfi_version = 0;
  4350. char id[4];
  4351. int i, ret, val;
  4352. /* Try ONFI for unknown chip or LP */
  4353. ret = nand_readid_op(chip, 0x20, id, sizeof(id));
  4354. if (ret || strncmp(id, "ONFI", 4))
  4355. return 0;
  4356. /* ONFI chip: allocate a buffer to hold its parameter page */
  4357. p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
  4358. if (!p)
  4359. return -ENOMEM;
  4360. ret = nand_read_param_page_op(chip, 0, NULL, 0);
  4361. if (ret) {
  4362. ret = 0;
  4363. goto free_onfi_param_page;
  4364. }
  4365. for (i = 0; i < 3; i++) {
  4366. ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
  4367. if (ret) {
  4368. ret = 0;
  4369. goto free_onfi_param_page;
  4370. }
  4371. if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
  4372. le16_to_cpu(p->crc)) {
  4373. if (i)
  4374. memcpy(p, &p[i], sizeof(*p));
  4375. break;
  4376. }
  4377. }
  4378. if (i == 3) {
  4379. const void *srcbufs[3] = {p, p + 1, p + 2};
  4380. pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
  4381. nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
  4382. sizeof(*p));
  4383. if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
  4384. le16_to_cpu(p->crc)) {
  4385. pr_err("ONFI parameter recovery failed, aborting\n");
  4386. goto free_onfi_param_page;
  4387. }
  4388. }
  4389. if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
  4390. chip->manufacturer.desc->ops->fixup_onfi_param_page)
  4391. chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
  4392. /* Check version */
  4393. val = le16_to_cpu(p->revision);
  4394. if (val & ONFI_VERSION_2_3)
  4395. onfi_version = 23;
  4396. else if (val & ONFI_VERSION_2_2)
  4397. onfi_version = 22;
  4398. else if (val & ONFI_VERSION_2_1)
  4399. onfi_version = 21;
  4400. else if (val & ONFI_VERSION_2_0)
  4401. onfi_version = 20;
  4402. else if (val & ONFI_VERSION_1_0)
  4403. onfi_version = 10;
  4404. if (!onfi_version) {
  4405. pr_info("unsupported ONFI version: %d\n", val);
  4406. goto free_onfi_param_page;
  4407. }
  4408. sanitize_string(p->manufacturer, sizeof(p->manufacturer));
  4409. sanitize_string(p->model, sizeof(p->model));
  4410. chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
  4411. if (!chip->parameters.model) {
  4412. ret = -ENOMEM;
  4413. goto free_onfi_param_page;
  4414. }
  4415. mtd->writesize = le32_to_cpu(p->byte_per_page);
  4416. /*
  4417. * pages_per_block and blocks_per_lun may not be a power-of-2 size
  4418. * (don't ask me who thought of this...). MTD assumes that these
  4419. * dimensions will be power-of-2, so just truncate the remaining area.
  4420. */
  4421. mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
  4422. mtd->erasesize *= mtd->writesize;
  4423. mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
  4424. /* See erasesize comment */
  4425. chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
  4426. chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
  4427. chip->bits_per_cell = p->bits_per_cell;
  4428. chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
  4429. chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
  4430. if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
  4431. chip->options |= NAND_BUSWIDTH_16;
  4432. if (p->ecc_bits != 0xff) {
  4433. chip->ecc_strength_ds = p->ecc_bits;
  4434. chip->ecc_step_ds = 512;
  4435. } else if (onfi_version >= 21 &&
  4436. (le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
  4437. /*
  4438. * The nand_flash_detect_ext_param_page() uses the
  4439. * Change Read Column command which maybe not supported
  4440. * by the chip->legacy.cmdfunc. So try to update the
  4441. * chip->legacy.cmdfunc now. We do not replace user supplied
  4442. * command function.
  4443. */
  4444. if (mtd->writesize > 512 &&
  4445. chip->legacy.cmdfunc == nand_command)
  4446. chip->legacy.cmdfunc = nand_command_lp;
  4447. /* The Extended Parameter Page is supported since ONFI 2.1. */
  4448. if (nand_flash_detect_ext_param_page(chip, p))
  4449. pr_warn("Failed to detect ONFI extended param page\n");
  4450. } else {
  4451. pr_warn("Could not retrieve ONFI ECC requirements\n");
  4452. }
  4453. /* Save some parameters from the parameter page for future use */
  4454. if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
  4455. chip->parameters.supports_set_get_features = true;
  4456. bitmap_set(chip->parameters.get_feature_list,
  4457. ONFI_FEATURE_ADDR_TIMING_MODE, 1);
  4458. bitmap_set(chip->parameters.set_feature_list,
  4459. ONFI_FEATURE_ADDR_TIMING_MODE, 1);
  4460. }
  4461. onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
  4462. if (!onfi) {
  4463. ret = -ENOMEM;
  4464. goto free_model;
  4465. }
  4466. onfi->version = onfi_version;
  4467. onfi->tPROG = le16_to_cpu(p->t_prog);
  4468. onfi->tBERS = le16_to_cpu(p->t_bers);
  4469. onfi->tR = le16_to_cpu(p->t_r);
  4470. onfi->tCCS = le16_to_cpu(p->t_ccs);
  4471. onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
  4472. onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
  4473. memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
  4474. chip->parameters.onfi = onfi;
  4475. /* Identification done, free the full ONFI parameter page and exit */
  4476. kfree(p);
  4477. return 1;
  4478. free_model:
  4479. kfree(chip->parameters.model);
  4480. free_onfi_param_page:
  4481. kfree(p);
  4482. return ret;
  4483. }
  4484. /*
  4485. * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
  4486. */
  4487. static int nand_flash_detect_jedec(struct nand_chip *chip)
  4488. {
  4489. struct mtd_info *mtd = nand_to_mtd(chip);
  4490. struct nand_jedec_params *p;
  4491. struct jedec_ecc_info *ecc;
  4492. int jedec_version = 0;
  4493. char id[5];
  4494. int i, val, ret;
  4495. /* Try JEDEC for unknown chip or LP */
  4496. ret = nand_readid_op(chip, 0x40, id, sizeof(id));
  4497. if (ret || strncmp(id, "JEDEC", sizeof(id)))
  4498. return 0;
  4499. /* JEDEC chip: allocate a buffer to hold its parameter page */
  4500. p = kzalloc(sizeof(*p), GFP_KERNEL);
  4501. if (!p)
  4502. return -ENOMEM;
  4503. ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
  4504. if (ret) {
  4505. ret = 0;
  4506. goto free_jedec_param_page;
  4507. }
  4508. for (i = 0; i < 3; i++) {
  4509. ret = nand_read_data_op(chip, p, sizeof(*p), true);
  4510. if (ret) {
  4511. ret = 0;
  4512. goto free_jedec_param_page;
  4513. }
  4514. if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
  4515. le16_to_cpu(p->crc))
  4516. break;
  4517. }
  4518. if (i == 3) {
  4519. pr_err("Could not find valid JEDEC parameter page; aborting\n");
  4520. goto free_jedec_param_page;
  4521. }
  4522. /* Check version */
  4523. val = le16_to_cpu(p->revision);
  4524. if (val & (1 << 2))
  4525. jedec_version = 10;
  4526. else if (val & (1 << 1))
  4527. jedec_version = 1; /* vendor specific version */
  4528. if (!jedec_version) {
  4529. pr_info("unsupported JEDEC version: %d\n", val);
  4530. goto free_jedec_param_page;
  4531. }
  4532. sanitize_string(p->manufacturer, sizeof(p->manufacturer));
  4533. sanitize_string(p->model, sizeof(p->model));
  4534. chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
  4535. if (!chip->parameters.model) {
  4536. ret = -ENOMEM;
  4537. goto free_jedec_param_page;
  4538. }
  4539. mtd->writesize = le32_to_cpu(p->byte_per_page);
  4540. /* Please reference to the comment for nand_flash_detect_onfi. */
  4541. mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
  4542. mtd->erasesize *= mtd->writesize;
  4543. mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
  4544. /* Please reference to the comment for nand_flash_detect_onfi. */
  4545. chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
  4546. chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
  4547. chip->bits_per_cell = p->bits_per_cell;
  4548. if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
  4549. chip->options |= NAND_BUSWIDTH_16;
  4550. /* ECC info */
  4551. ecc = &p->ecc_info[0];
  4552. if (ecc->codeword_size >= 9) {
  4553. chip->ecc_strength_ds = ecc->ecc_bits;
  4554. chip->ecc_step_ds = 1 << ecc->codeword_size;
  4555. } else {
  4556. pr_warn("Invalid codeword size\n");
  4557. }
  4558. free_jedec_param_page:
  4559. kfree(p);
  4560. return ret;
  4561. }
  4562. /*
  4563. * nand_id_has_period - Check if an ID string has a given wraparound period
  4564. * @id_data: the ID string
  4565. * @arrlen: the length of the @id_data array
  4566. * @period: the period of repitition
  4567. *
  4568. * Check if an ID string is repeated within a given sequence of bytes at
  4569. * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
  4570. * period of 3). This is a helper function for nand_id_len(). Returns non-zero
  4571. * if the repetition has a period of @period; otherwise, returns zero.
  4572. */
  4573. static int nand_id_has_period(u8 *id_data, int arrlen, int period)
  4574. {
  4575. int i, j;
  4576. for (i = 0; i < period; i++)
  4577. for (j = i + period; j < arrlen; j += period)
  4578. if (id_data[i] != id_data[j])
  4579. return 0;
  4580. return 1;
  4581. }
  4582. /*
  4583. * nand_id_len - Get the length of an ID string returned by CMD_READID
  4584. * @id_data: the ID string
  4585. * @arrlen: the length of the @id_data array
  4586. * Returns the length of the ID string, according to known wraparound/trailing
  4587. * zero patterns. If no pattern exists, returns the length of the array.
  4588. */
  4589. static int nand_id_len(u8 *id_data, int arrlen)
  4590. {
  4591. int last_nonzero, period;
  4592. /* Find last non-zero byte */
  4593. for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
  4594. if (id_data[last_nonzero])
  4595. break;
  4596. /* All zeros */
  4597. if (last_nonzero < 0)
  4598. return 0;
  4599. /* Calculate wraparound period */
  4600. for (period = 1; period < arrlen; period++)
  4601. if (nand_id_has_period(id_data, arrlen, period))
  4602. break;
  4603. /* There's a repeated pattern */
  4604. if (period < arrlen)
  4605. return period;
  4606. /* There are trailing zeros */
  4607. if (last_nonzero < arrlen - 1)
  4608. return last_nonzero + 1;
  4609. /* No pattern detected */
  4610. return arrlen;
  4611. }
  4612. /* Extract the bits of per cell from the 3rd byte of the extended ID */
  4613. static int nand_get_bits_per_cell(u8 cellinfo)
  4614. {
  4615. int bits;
  4616. bits = cellinfo & NAND_CI_CELLTYPE_MSK;
  4617. bits >>= NAND_CI_CELLTYPE_SHIFT;
  4618. return bits + 1;
  4619. }
  4620. /*
  4621. * Many new NAND share similar device ID codes, which represent the size of the
  4622. * chip. The rest of the parameters must be decoded according to generic or
  4623. * manufacturer-specific "extended ID" decoding patterns.
  4624. */
  4625. void nand_decode_ext_id(struct nand_chip *chip)
  4626. {
  4627. struct mtd_info *mtd = nand_to_mtd(chip);
  4628. int extid;
  4629. u8 *id_data = chip->id.data;
  4630. /* The 3rd id byte holds MLC / multichip data */
  4631. chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
  4632. /* The 4th id byte is the important one */
  4633. extid = id_data[3];
  4634. /* Calc pagesize */
  4635. mtd->writesize = 1024 << (extid & 0x03);
  4636. extid >>= 2;
  4637. /* Calc oobsize */
  4638. mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
  4639. extid >>= 2;
  4640. /* Calc blocksize. Blocksize is multiples of 64KiB */
  4641. mtd->erasesize = (64 * 1024) << (extid & 0x03);
  4642. extid >>= 2;
  4643. /* Get buswidth information */
  4644. if (extid & 0x1)
  4645. chip->options |= NAND_BUSWIDTH_16;
  4646. }
  4647. EXPORT_SYMBOL_GPL(nand_decode_ext_id);
  4648. /*
  4649. * Old devices have chip data hardcoded in the device ID table. nand_decode_id
  4650. * decodes a matching ID table entry and assigns the MTD size parameters for
  4651. * the chip.
  4652. */
  4653. static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
  4654. {
  4655. struct mtd_info *mtd = nand_to_mtd(chip);
  4656. mtd->erasesize = type->erasesize;
  4657. mtd->writesize = type->pagesize;
  4658. mtd->oobsize = mtd->writesize / 32;
  4659. /* All legacy ID NAND are small-page, SLC */
  4660. chip->bits_per_cell = 1;
  4661. }
  4662. /*
  4663. * Set the bad block marker/indicator (BBM/BBI) patterns according to some
  4664. * heuristic patterns using various detected parameters (e.g., manufacturer,
  4665. * page size, cell-type information).
  4666. */
  4667. static void nand_decode_bbm_options(struct nand_chip *chip)
  4668. {
  4669. struct mtd_info *mtd = nand_to_mtd(chip);
  4670. /* Set the bad block position */
  4671. if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
  4672. chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
  4673. else
  4674. chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
  4675. }
  4676. static inline bool is_full_id_nand(struct nand_flash_dev *type)
  4677. {
  4678. return type->id_len;
  4679. }
  4680. static bool find_full_id_nand(struct nand_chip *chip,
  4681. struct nand_flash_dev *type)
  4682. {
  4683. struct mtd_info *mtd = nand_to_mtd(chip);
  4684. u8 *id_data = chip->id.data;
  4685. if (!strncmp(type->id, id_data, type->id_len)) {
  4686. mtd->writesize = type->pagesize;
  4687. mtd->erasesize = type->erasesize;
  4688. mtd->oobsize = type->oobsize;
  4689. chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
  4690. chip->chipsize = (uint64_t)type->chipsize << 20;
  4691. chip->options |= type->options;
  4692. chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
  4693. chip->ecc_step_ds = NAND_ECC_STEP(type);
  4694. chip->onfi_timing_mode_default =
  4695. type->onfi_timing_mode_default;
  4696. chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
  4697. if (!chip->parameters.model)
  4698. return false;
  4699. return true;
  4700. }
  4701. return false;
  4702. }
  4703. /*
  4704. * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
  4705. * compliant and does not have a full-id or legacy-id entry in the nand_ids
  4706. * table.
  4707. */
  4708. static void nand_manufacturer_detect(struct nand_chip *chip)
  4709. {
  4710. /*
  4711. * Try manufacturer detection if available and use
  4712. * nand_decode_ext_id() otherwise.
  4713. */
  4714. if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
  4715. chip->manufacturer.desc->ops->detect) {
  4716. /* The 3rd id byte holds MLC / multichip data */
  4717. chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
  4718. chip->manufacturer.desc->ops->detect(chip);
  4719. } else {
  4720. nand_decode_ext_id(chip);
  4721. }
  4722. }
  4723. /*
  4724. * Manufacturer initialization. This function is called for all NANDs including
  4725. * ONFI and JEDEC compliant ones.
  4726. * Manufacturer drivers should put all their specific initialization code in
  4727. * their ->init() hook.
  4728. */
  4729. static int nand_manufacturer_init(struct nand_chip *chip)
  4730. {
  4731. if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
  4732. !chip->manufacturer.desc->ops->init)
  4733. return 0;
  4734. return chip->manufacturer.desc->ops->init(chip);
  4735. }
  4736. /*
  4737. * Manufacturer cleanup. This function is called for all NANDs including
  4738. * ONFI and JEDEC compliant ones.
  4739. * Manufacturer drivers should put all their specific cleanup code in their
  4740. * ->cleanup() hook.
  4741. */
  4742. static void nand_manufacturer_cleanup(struct nand_chip *chip)
  4743. {
  4744. /* Release manufacturer private data */
  4745. if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
  4746. chip->manufacturer.desc->ops->cleanup)
  4747. chip->manufacturer.desc->ops->cleanup(chip);
  4748. }
  4749. /*
  4750. * Get the flash and manufacturer id and lookup if the type is supported.
  4751. */
  4752. static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
  4753. {
  4754. const struct nand_manufacturer *manufacturer;
  4755. struct mtd_info *mtd = nand_to_mtd(chip);
  4756. int busw, ret;
  4757. u8 *id_data = chip->id.data;
  4758. u8 maf_id, dev_id;
  4759. /*
  4760. * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
  4761. * after power-up.
  4762. */
  4763. ret = nand_reset(chip, 0);
  4764. if (ret)
  4765. return ret;
  4766. /* Select the device */
  4767. chip->select_chip(chip, 0);
  4768. /* Send the command for reading device ID */
  4769. ret = nand_readid_op(chip, 0, id_data, 2);
  4770. if (ret)
  4771. return ret;
  4772. /* Read manufacturer and device IDs */
  4773. maf_id = id_data[0];
  4774. dev_id = id_data[1];
  4775. /*
  4776. * Try again to make sure, as some systems the bus-hold or other
  4777. * interface concerns can cause random data which looks like a
  4778. * possibly credible NAND flash to appear. If the two results do
  4779. * not match, ignore the device completely.
  4780. */
  4781. /* Read entire ID string */
  4782. ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
  4783. if (ret)
  4784. return ret;
  4785. if (id_data[0] != maf_id || id_data[1] != dev_id) {
  4786. pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
  4787. maf_id, dev_id, id_data[0], id_data[1]);
  4788. return -ENODEV;
  4789. }
  4790. chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
  4791. /* Try to identify manufacturer */
  4792. manufacturer = nand_get_manufacturer(maf_id);
  4793. chip->manufacturer.desc = manufacturer;
  4794. if (!type)
  4795. type = nand_flash_ids;
  4796. /*
  4797. * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
  4798. * override it.
  4799. * This is required to make sure initial NAND bus width set by the
  4800. * NAND controller driver is coherent with the real NAND bus width
  4801. * (extracted by auto-detection code).
  4802. */
  4803. busw = chip->options & NAND_BUSWIDTH_16;
  4804. /*
  4805. * The flag is only set (never cleared), reset it to its default value
  4806. * before starting auto-detection.
  4807. */
  4808. chip->options &= ~NAND_BUSWIDTH_16;
  4809. for (; type->name != NULL; type++) {
  4810. if (is_full_id_nand(type)) {
  4811. if (find_full_id_nand(chip, type))
  4812. goto ident_done;
  4813. } else if (dev_id == type->dev_id) {
  4814. break;
  4815. }
  4816. }
  4817. if (!type->name || !type->pagesize) {
  4818. /* Check if the chip is ONFI compliant */
  4819. ret = nand_flash_detect_onfi(chip);
  4820. if (ret < 0)
  4821. return ret;
  4822. else if (ret)
  4823. goto ident_done;
  4824. /* Check if the chip is JEDEC compliant */
  4825. ret = nand_flash_detect_jedec(chip);
  4826. if (ret < 0)
  4827. return ret;
  4828. else if (ret)
  4829. goto ident_done;
  4830. }
  4831. if (!type->name)
  4832. return -ENODEV;
  4833. chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
  4834. if (!chip->parameters.model)
  4835. return -ENOMEM;
  4836. chip->chipsize = (uint64_t)type->chipsize << 20;
  4837. if (!type->pagesize)
  4838. nand_manufacturer_detect(chip);
  4839. else
  4840. nand_decode_id(chip, type);
  4841. /* Get chip options */
  4842. chip->options |= type->options;
  4843. ident_done:
  4844. if (!mtd->name)
  4845. mtd->name = chip->parameters.model;
  4846. if (chip->options & NAND_BUSWIDTH_AUTO) {
  4847. WARN_ON(busw & NAND_BUSWIDTH_16);
  4848. nand_set_defaults(chip);
  4849. } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
  4850. /*
  4851. * Check, if buswidth is correct. Hardware drivers should set
  4852. * chip correct!
  4853. */
  4854. pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
  4855. maf_id, dev_id);
  4856. pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
  4857. mtd->name);
  4858. pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
  4859. (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
  4860. ret = -EINVAL;
  4861. goto free_detect_allocation;
  4862. }
  4863. nand_decode_bbm_options(chip);
  4864. /* Calculate the address shift from the page size */
  4865. chip->page_shift = ffs(mtd->writesize) - 1;
  4866. /* Convert chipsize to number of pages per chip -1 */
  4867. chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
  4868. chip->bbt_erase_shift = chip->phys_erase_shift =
  4869. ffs(mtd->erasesize) - 1;
  4870. if (chip->chipsize & 0xffffffff)
  4871. chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
  4872. else {
  4873. chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
  4874. chip->chip_shift += 32 - 1;
  4875. }
  4876. if (chip->chip_shift - chip->page_shift > 16)
  4877. chip->options |= NAND_ROW_ADDR_3;
  4878. chip->badblockbits = 8;
  4879. /* Do not replace user supplied command function! */
  4880. if (mtd->writesize > 512 && chip->legacy.cmdfunc == nand_command)
  4881. chip->legacy.cmdfunc = nand_command_lp;
  4882. pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
  4883. maf_id, dev_id);
  4884. pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
  4885. chip->parameters.model);
  4886. pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
  4887. (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
  4888. mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
  4889. return 0;
  4890. free_detect_allocation:
  4891. kfree(chip->parameters.model);
  4892. return ret;
  4893. }
  4894. static const char * const nand_ecc_modes[] = {
  4895. [NAND_ECC_NONE] = "none",
  4896. [NAND_ECC_SOFT] = "soft",
  4897. [NAND_ECC_HW] = "hw",
  4898. [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
  4899. [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
  4900. [NAND_ECC_ON_DIE] = "on-die",
  4901. };
  4902. static int of_get_nand_ecc_mode(struct device_node *np)
  4903. {
  4904. const char *pm;
  4905. int err, i;
  4906. err = of_property_read_string(np, "nand-ecc-mode", &pm);
  4907. if (err < 0)
  4908. return err;
  4909. for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
  4910. if (!strcasecmp(pm, nand_ecc_modes[i]))
  4911. return i;
  4912. /*
  4913. * For backward compatibility we support few obsoleted values that don't
  4914. * have their mappings into nand_ecc_modes_t anymore (they were merged
  4915. * with other enums).
  4916. */
  4917. if (!strcasecmp(pm, "soft_bch"))
  4918. return NAND_ECC_SOFT;
  4919. return -ENODEV;
  4920. }
  4921. static const char * const nand_ecc_algos[] = {
  4922. [NAND_ECC_HAMMING] = "hamming",
  4923. [NAND_ECC_BCH] = "bch",
  4924. [NAND_ECC_RS] = "rs",
  4925. };
  4926. static int of_get_nand_ecc_algo(struct device_node *np)
  4927. {
  4928. const char *pm;
  4929. int err, i;
  4930. err = of_property_read_string(np, "nand-ecc-algo", &pm);
  4931. if (!err) {
  4932. for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
  4933. if (!strcasecmp(pm, nand_ecc_algos[i]))
  4934. return i;
  4935. return -ENODEV;
  4936. }
  4937. /*
  4938. * For backward compatibility we also read "nand-ecc-mode" checking
  4939. * for some obsoleted values that were specifying ECC algorithm.
  4940. */
  4941. err = of_property_read_string(np, "nand-ecc-mode", &pm);
  4942. if (err < 0)
  4943. return err;
  4944. if (!strcasecmp(pm, "soft"))
  4945. return NAND_ECC_HAMMING;
  4946. else if (!strcasecmp(pm, "soft_bch"))
  4947. return NAND_ECC_BCH;
  4948. return -ENODEV;
  4949. }
  4950. static int of_get_nand_ecc_step_size(struct device_node *np)
  4951. {
  4952. int ret;
  4953. u32 val;
  4954. ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
  4955. return ret ? ret : val;
  4956. }
  4957. static int of_get_nand_ecc_strength(struct device_node *np)
  4958. {
  4959. int ret;
  4960. u32 val;
  4961. ret = of_property_read_u32(np, "nand-ecc-strength", &val);
  4962. return ret ? ret : val;
  4963. }
  4964. static int of_get_nand_bus_width(struct device_node *np)
  4965. {
  4966. u32 val;
  4967. if (of_property_read_u32(np, "nand-bus-width", &val))
  4968. return 8;
  4969. switch (val) {
  4970. case 8:
  4971. case 16:
  4972. return val;
  4973. default:
  4974. return -EIO;
  4975. }
  4976. }
  4977. static bool of_get_nand_on_flash_bbt(struct device_node *np)
  4978. {
  4979. return of_property_read_bool(np, "nand-on-flash-bbt");
  4980. }
  4981. static int nand_dt_init(struct nand_chip *chip)
  4982. {
  4983. struct device_node *dn = nand_get_flash_node(chip);
  4984. int ecc_mode, ecc_algo, ecc_strength, ecc_step;
  4985. if (!dn)
  4986. return 0;
  4987. if (of_get_nand_bus_width(dn) == 16)
  4988. chip->options |= NAND_BUSWIDTH_16;
  4989. if (of_property_read_bool(dn, "nand-is-boot-medium"))
  4990. chip->options |= NAND_IS_BOOT_MEDIUM;
  4991. if (of_get_nand_on_flash_bbt(dn))
  4992. chip->bbt_options |= NAND_BBT_USE_FLASH;
  4993. ecc_mode = of_get_nand_ecc_mode(dn);
  4994. ecc_algo = of_get_nand_ecc_algo(dn);
  4995. ecc_strength = of_get_nand_ecc_strength(dn);
  4996. ecc_step = of_get_nand_ecc_step_size(dn);
  4997. if (ecc_mode >= 0)
  4998. chip->ecc.mode = ecc_mode;
  4999. if (ecc_algo >= 0)
  5000. chip->ecc.algo = ecc_algo;
  5001. if (ecc_strength >= 0)
  5002. chip->ecc.strength = ecc_strength;
  5003. if (ecc_step > 0)
  5004. chip->ecc.size = ecc_step;
  5005. if (of_property_read_bool(dn, "nand-ecc-maximize"))
  5006. chip->ecc.options |= NAND_ECC_MAXIMIZE;
  5007. return 0;
  5008. }
  5009. /**
  5010. * nand_scan_ident - Scan for the NAND device
  5011. * @chip: NAND chip object
  5012. * @maxchips: number of chips to scan for
  5013. * @table: alternative NAND ID table
  5014. *
  5015. * This is the first phase of the normal nand_scan() function. It reads the
  5016. * flash ID and sets up MTD fields accordingly.
  5017. *
  5018. * This helper used to be called directly from controller drivers that needed
  5019. * to tweak some ECC-related parameters before nand_scan_tail(). This separation
  5020. * prevented dynamic allocations during this phase which was unconvenient and
  5021. * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
  5022. */
  5023. static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
  5024. struct nand_flash_dev *table)
  5025. {
  5026. struct mtd_info *mtd = nand_to_mtd(chip);
  5027. int nand_maf_id, nand_dev_id;
  5028. unsigned int i;
  5029. int ret;
  5030. /* Enforce the right timings for reset/detection */
  5031. onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
  5032. ret = nand_dt_init(chip);
  5033. if (ret)
  5034. return ret;
  5035. if (!mtd->name && mtd->dev.parent)
  5036. mtd->name = dev_name(mtd->dev.parent);
  5037. /*
  5038. * ->legacy.cmdfunc() is legacy and will only be used if ->exec_op() is
  5039. * not populated.
  5040. */
  5041. if (!chip->exec_op) {
  5042. /*
  5043. * Default functions assigned for ->legacy.cmdfunc() and
  5044. * ->select_chip() both expect ->legacy.cmd_ctrl() to be
  5045. * populated.
  5046. */
  5047. if ((!chip->legacy.cmdfunc || !chip->select_chip) &&
  5048. !chip->legacy.cmd_ctrl) {
  5049. pr_err("->legacy.cmd_ctrl() should be provided\n");
  5050. return -EINVAL;
  5051. }
  5052. }
  5053. /* Set the default functions */
  5054. nand_set_defaults(chip);
  5055. /* Read the flash type */
  5056. ret = nand_detect(chip, table);
  5057. if (ret) {
  5058. if (!(chip->options & NAND_SCAN_SILENT_NODEV))
  5059. pr_warn("No NAND device found\n");
  5060. chip->select_chip(chip, -1);
  5061. return ret;
  5062. }
  5063. nand_maf_id = chip->id.data[0];
  5064. nand_dev_id = chip->id.data[1];
  5065. chip->select_chip(chip, -1);
  5066. /* Check for a chip array */
  5067. for (i = 1; i < maxchips; i++) {
  5068. u8 id[2];
  5069. /* See comment in nand_get_flash_type for reset */
  5070. nand_reset(chip, i);
  5071. chip->select_chip(chip, i);
  5072. /* Send the command for reading device ID */
  5073. nand_readid_op(chip, 0, id, sizeof(id));
  5074. /* Read manufacturer and device IDs */
  5075. if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
  5076. chip->select_chip(chip, -1);
  5077. break;
  5078. }
  5079. chip->select_chip(chip, -1);
  5080. }
  5081. if (i > 1)
  5082. pr_info("%d chips detected\n", i);
  5083. /* Store the number of chips and calc total size for mtd */
  5084. chip->numchips = i;
  5085. mtd->size = i * chip->chipsize;
  5086. return 0;
  5087. }
  5088. static void nand_scan_ident_cleanup(struct nand_chip *chip)
  5089. {
  5090. kfree(chip->parameters.model);
  5091. kfree(chip->parameters.onfi);
  5092. }
  5093. static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
  5094. {
  5095. struct nand_chip *chip = mtd_to_nand(mtd);
  5096. struct nand_ecc_ctrl *ecc = &chip->ecc;
  5097. if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
  5098. return -EINVAL;
  5099. switch (ecc->algo) {
  5100. case NAND_ECC_HAMMING:
  5101. ecc->calculate = nand_calculate_ecc;
  5102. ecc->correct = nand_correct_data;
  5103. ecc->read_page = nand_read_page_swecc;
  5104. ecc->read_subpage = nand_read_subpage;
  5105. ecc->write_page = nand_write_page_swecc;
  5106. ecc->read_page_raw = nand_read_page_raw;
  5107. ecc->write_page_raw = nand_write_page_raw;
  5108. ecc->read_oob = nand_read_oob_std;
  5109. ecc->write_oob = nand_write_oob_std;
  5110. if (!ecc->size)
  5111. ecc->size = 256;
  5112. ecc->bytes = 3;
  5113. ecc->strength = 1;
  5114. return 0;
  5115. case NAND_ECC_BCH:
  5116. if (!mtd_nand_has_bch()) {
  5117. WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
  5118. return -EINVAL;
  5119. }
  5120. ecc->calculate = nand_bch_calculate_ecc;
  5121. ecc->correct = nand_bch_correct_data;
  5122. ecc->read_page = nand_read_page_swecc;
  5123. ecc->read_subpage = nand_read_subpage;
  5124. ecc->write_page = nand_write_page_swecc;
  5125. ecc->read_page_raw = nand_read_page_raw;
  5126. ecc->write_page_raw = nand_write_page_raw;
  5127. ecc->read_oob = nand_read_oob_std;
  5128. ecc->write_oob = nand_write_oob_std;
  5129. /*
  5130. * Board driver should supply ecc.size and ecc.strength
  5131. * values to select how many bits are correctable.
  5132. * Otherwise, default to 4 bits for large page devices.
  5133. */
  5134. if (!ecc->size && (mtd->oobsize >= 64)) {
  5135. ecc->size = 512;
  5136. ecc->strength = 4;
  5137. }
  5138. /*
  5139. * if no ecc placement scheme was provided pickup the default
  5140. * large page one.
  5141. */
  5142. if (!mtd->ooblayout) {
  5143. /* handle large page devices only */
  5144. if (mtd->oobsize < 64) {
  5145. WARN(1, "OOB layout is required when using software BCH on small pages\n");
  5146. return -EINVAL;
  5147. }
  5148. mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
  5149. }
  5150. /*
  5151. * We can only maximize ECC config when the default layout is
  5152. * used, otherwise we don't know how many bytes can really be
  5153. * used.
  5154. */
  5155. if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
  5156. ecc->options & NAND_ECC_MAXIMIZE) {
  5157. int steps, bytes;
  5158. /* Always prefer 1k blocks over 512bytes ones */
  5159. ecc->size = 1024;
  5160. steps = mtd->writesize / ecc->size;
  5161. /* Reserve 2 bytes for the BBM */
  5162. bytes = (mtd->oobsize - 2) / steps;
  5163. ecc->strength = bytes * 8 / fls(8 * ecc->size);
  5164. }
  5165. /* See nand_bch_init() for details. */
  5166. ecc->bytes = 0;
  5167. ecc->priv = nand_bch_init(mtd);
  5168. if (!ecc->priv) {
  5169. WARN(1, "BCH ECC initialization failed!\n");
  5170. return -EINVAL;
  5171. }
  5172. return 0;
  5173. default:
  5174. WARN(1, "Unsupported ECC algorithm!\n");
  5175. return -EINVAL;
  5176. }
  5177. }
  5178. /**
  5179. * nand_check_ecc_caps - check the sanity of preset ECC settings
  5180. * @chip: nand chip info structure
  5181. * @caps: ECC caps info structure
  5182. * @oobavail: OOB size that the ECC engine can use
  5183. *
  5184. * When ECC step size and strength are already set, check if they are supported
  5185. * by the controller and the calculated ECC bytes fit within the chip's OOB.
  5186. * On success, the calculated ECC bytes is set.
  5187. */
  5188. static int
  5189. nand_check_ecc_caps(struct nand_chip *chip,
  5190. const struct nand_ecc_caps *caps, int oobavail)
  5191. {
  5192. struct mtd_info *mtd = nand_to_mtd(chip);
  5193. const struct nand_ecc_step_info *stepinfo;
  5194. int preset_step = chip->ecc.size;
  5195. int preset_strength = chip->ecc.strength;
  5196. int ecc_bytes, nsteps = mtd->writesize / preset_step;
  5197. int i, j;
  5198. for (i = 0; i < caps->nstepinfos; i++) {
  5199. stepinfo = &caps->stepinfos[i];
  5200. if (stepinfo->stepsize != preset_step)
  5201. continue;
  5202. for (j = 0; j < stepinfo->nstrengths; j++) {
  5203. if (stepinfo->strengths[j] != preset_strength)
  5204. continue;
  5205. ecc_bytes = caps->calc_ecc_bytes(preset_step,
  5206. preset_strength);
  5207. if (WARN_ON_ONCE(ecc_bytes < 0))
  5208. return ecc_bytes;
  5209. if (ecc_bytes * nsteps > oobavail) {
  5210. pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
  5211. preset_step, preset_strength);
  5212. return -ENOSPC;
  5213. }
  5214. chip->ecc.bytes = ecc_bytes;
  5215. return 0;
  5216. }
  5217. }
  5218. pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
  5219. preset_step, preset_strength);
  5220. return -ENOTSUPP;
  5221. }
  5222. /**
  5223. * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
  5224. * @chip: nand chip info structure
  5225. * @caps: ECC engine caps info structure
  5226. * @oobavail: OOB size that the ECC engine can use
  5227. *
  5228. * If a chip's ECC requirement is provided, try to meet it with the least
  5229. * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
  5230. * On success, the chosen ECC settings are set.
  5231. */
  5232. static int
  5233. nand_match_ecc_req(struct nand_chip *chip,
  5234. const struct nand_ecc_caps *caps, int oobavail)
  5235. {
  5236. struct mtd_info *mtd = nand_to_mtd(chip);
  5237. const struct nand_ecc_step_info *stepinfo;
  5238. int req_step = chip->ecc_step_ds;
  5239. int req_strength = chip->ecc_strength_ds;
  5240. int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
  5241. int best_step, best_strength, best_ecc_bytes;
  5242. int best_ecc_bytes_total = INT_MAX;
  5243. int i, j;
  5244. /* No information provided by the NAND chip */
  5245. if (!req_step || !req_strength)
  5246. return -ENOTSUPP;
  5247. /* number of correctable bits the chip requires in a page */
  5248. req_corr = mtd->writesize / req_step * req_strength;
  5249. for (i = 0; i < caps->nstepinfos; i++) {
  5250. stepinfo = &caps->stepinfos[i];
  5251. step_size = stepinfo->stepsize;
  5252. for (j = 0; j < stepinfo->nstrengths; j++) {
  5253. strength = stepinfo->strengths[j];
  5254. /*
  5255. * If both step size and strength are smaller than the
  5256. * chip's requirement, it is not easy to compare the
  5257. * resulted reliability.
  5258. */
  5259. if (step_size < req_step && strength < req_strength)
  5260. continue;
  5261. if (mtd->writesize % step_size)
  5262. continue;
  5263. nsteps = mtd->writesize / step_size;
  5264. ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
  5265. if (WARN_ON_ONCE(ecc_bytes < 0))
  5266. continue;
  5267. ecc_bytes_total = ecc_bytes * nsteps;
  5268. if (ecc_bytes_total > oobavail ||
  5269. strength * nsteps < req_corr)
  5270. continue;
  5271. /*
  5272. * We assume the best is to meet the chip's requrement
  5273. * with the least number of ECC bytes.
  5274. */
  5275. if (ecc_bytes_total < best_ecc_bytes_total) {
  5276. best_ecc_bytes_total = ecc_bytes_total;
  5277. best_step = step_size;
  5278. best_strength = strength;
  5279. best_ecc_bytes = ecc_bytes;
  5280. }
  5281. }
  5282. }
  5283. if (best_ecc_bytes_total == INT_MAX)
  5284. return -ENOTSUPP;
  5285. chip->ecc.size = best_step;
  5286. chip->ecc.strength = best_strength;
  5287. chip->ecc.bytes = best_ecc_bytes;
  5288. return 0;
  5289. }
  5290. /**
  5291. * nand_maximize_ecc - choose the max ECC strength available
  5292. * @chip: nand chip info structure
  5293. * @caps: ECC engine caps info structure
  5294. * @oobavail: OOB size that the ECC engine can use
  5295. *
  5296. * Choose the max ECC strength that is supported on the controller, and can fit
  5297. * within the chip's OOB. On success, the chosen ECC settings are set.
  5298. */
  5299. static int
  5300. nand_maximize_ecc(struct nand_chip *chip,
  5301. const struct nand_ecc_caps *caps, int oobavail)
  5302. {
  5303. struct mtd_info *mtd = nand_to_mtd(chip);
  5304. const struct nand_ecc_step_info *stepinfo;
  5305. int step_size, strength, nsteps, ecc_bytes, corr;
  5306. int best_corr = 0;
  5307. int best_step = 0;
  5308. int best_strength, best_ecc_bytes;
  5309. int i, j;
  5310. for (i = 0; i < caps->nstepinfos; i++) {
  5311. stepinfo = &caps->stepinfos[i];
  5312. step_size = stepinfo->stepsize;
  5313. /* If chip->ecc.size is already set, respect it */
  5314. if (chip->ecc.size && step_size != chip->ecc.size)
  5315. continue;
  5316. for (j = 0; j < stepinfo->nstrengths; j++) {
  5317. strength = stepinfo->strengths[j];
  5318. if (mtd->writesize % step_size)
  5319. continue;
  5320. nsteps = mtd->writesize / step_size;
  5321. ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
  5322. if (WARN_ON_ONCE(ecc_bytes < 0))
  5323. continue;
  5324. if (ecc_bytes * nsteps > oobavail)
  5325. continue;
  5326. corr = strength * nsteps;
  5327. /*
  5328. * If the number of correctable bits is the same,
  5329. * bigger step_size has more reliability.
  5330. */
  5331. if (corr > best_corr ||
  5332. (corr == best_corr && step_size > best_step)) {
  5333. best_corr = corr;
  5334. best_step = step_size;
  5335. best_strength = strength;
  5336. best_ecc_bytes = ecc_bytes;
  5337. }
  5338. }
  5339. }
  5340. if (!best_corr)
  5341. return -ENOTSUPP;
  5342. chip->ecc.size = best_step;
  5343. chip->ecc.strength = best_strength;
  5344. chip->ecc.bytes = best_ecc_bytes;
  5345. return 0;
  5346. }
  5347. /**
  5348. * nand_ecc_choose_conf - Set the ECC strength and ECC step size
  5349. * @chip: nand chip info structure
  5350. * @caps: ECC engine caps info structure
  5351. * @oobavail: OOB size that the ECC engine can use
  5352. *
  5353. * Choose the ECC configuration according to following logic
  5354. *
  5355. * 1. If both ECC step size and ECC strength are already set (usually by DT)
  5356. * then check if it is supported by this controller.
  5357. * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
  5358. * 3. Otherwise, try to match the ECC step size and ECC strength closest
  5359. * to the chip's requirement. If available OOB size can't fit the chip
  5360. * requirement then fallback to the maximum ECC step size and ECC strength.
  5361. *
  5362. * On success, the chosen ECC settings are set.
  5363. */
  5364. int nand_ecc_choose_conf(struct nand_chip *chip,
  5365. const struct nand_ecc_caps *caps, int oobavail)
  5366. {
  5367. struct mtd_info *mtd = nand_to_mtd(chip);
  5368. if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
  5369. return -EINVAL;
  5370. if (chip->ecc.size && chip->ecc.strength)
  5371. return nand_check_ecc_caps(chip, caps, oobavail);
  5372. if (chip->ecc.options & NAND_ECC_MAXIMIZE)
  5373. return nand_maximize_ecc(chip, caps, oobavail);
  5374. if (!nand_match_ecc_req(chip, caps, oobavail))
  5375. return 0;
  5376. return nand_maximize_ecc(chip, caps, oobavail);
  5377. }
  5378. EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
  5379. /*
  5380. * Check if the chip configuration meet the datasheet requirements.
  5381. * If our configuration corrects A bits per B bytes and the minimum
  5382. * required correction level is X bits per Y bytes, then we must ensure
  5383. * both of the following are true:
  5384. *
  5385. * (1) A / B >= X / Y
  5386. * (2) A >= X
  5387. *
  5388. * Requirement (1) ensures we can correct for the required bitflip density.
  5389. * Requirement (2) ensures we can correct even when all bitflips are clumped
  5390. * in the same sector.
  5391. */
  5392. static bool nand_ecc_strength_good(struct mtd_info *mtd)
  5393. {
  5394. struct nand_chip *chip = mtd_to_nand(mtd);
  5395. struct nand_ecc_ctrl *ecc = &chip->ecc;
  5396. int corr, ds_corr;
  5397. if (ecc->size == 0 || chip->ecc_step_ds == 0)
  5398. /* Not enough information */
  5399. return true;
  5400. /*
  5401. * We get the number of corrected bits per page to compare
  5402. * the correction density.
  5403. */
  5404. corr = (mtd->writesize * ecc->strength) / ecc->size;
  5405. ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
  5406. return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
  5407. }
  5408. /**
  5409. * nand_scan_tail - Scan for the NAND device
  5410. * @chip: NAND chip object
  5411. *
  5412. * This is the second phase of the normal nand_scan() function. It fills out
  5413. * all the uninitialized function pointers with the defaults and scans for a
  5414. * bad block table if appropriate.
  5415. */
  5416. static int nand_scan_tail(struct nand_chip *chip)
  5417. {
  5418. struct mtd_info *mtd = nand_to_mtd(chip);
  5419. struct nand_ecc_ctrl *ecc = &chip->ecc;
  5420. int ret, i;
  5421. /* New bad blocks should be marked in OOB, flash-based BBT, or both */
  5422. if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
  5423. !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
  5424. return -EINVAL;
  5425. }
  5426. chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  5427. if (!chip->data_buf)
  5428. return -ENOMEM;
  5429. /*
  5430. * FIXME: some NAND manufacturer drivers expect the first die to be
  5431. * selected when manufacturer->init() is called. They should be fixed
  5432. * to explictly select the relevant die when interacting with the NAND
  5433. * chip.
  5434. */
  5435. chip->select_chip(chip, 0);
  5436. ret = nand_manufacturer_init(chip);
  5437. chip->select_chip(chip, -1);
  5438. if (ret)
  5439. goto err_free_buf;
  5440. /* Set the internal oob buffer location, just after the page data */
  5441. chip->oob_poi = chip->data_buf + mtd->writesize;
  5442. /*
  5443. * If no default placement scheme is given, select an appropriate one.
  5444. */
  5445. if (!mtd->ooblayout &&
  5446. !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
  5447. switch (mtd->oobsize) {
  5448. case 8:
  5449. case 16:
  5450. mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
  5451. break;
  5452. case 64:
  5453. case 128:
  5454. mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
  5455. break;
  5456. default:
  5457. /*
  5458. * Expose the whole OOB area to users if ECC_NONE
  5459. * is passed. We could do that for all kind of
  5460. * ->oobsize, but we must keep the old large/small
  5461. * page with ECC layout when ->oobsize <= 128 for
  5462. * compatibility reasons.
  5463. */
  5464. if (ecc->mode == NAND_ECC_NONE) {
  5465. mtd_set_ooblayout(mtd,
  5466. &nand_ooblayout_lp_ops);
  5467. break;
  5468. }
  5469. WARN(1, "No oob scheme defined for oobsize %d\n",
  5470. mtd->oobsize);
  5471. ret = -EINVAL;
  5472. goto err_nand_manuf_cleanup;
  5473. }
  5474. }
  5475. /*
  5476. * Check ECC mode, default to software if 3byte/512byte hardware ECC is
  5477. * selected and we have 256 byte pagesize fallback to software ECC
  5478. */
  5479. switch (ecc->mode) {
  5480. case NAND_ECC_HW_OOB_FIRST:
  5481. /* Similar to NAND_ECC_HW, but a separate read_page handle */
  5482. if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
  5483. WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
  5484. ret = -EINVAL;
  5485. goto err_nand_manuf_cleanup;
  5486. }
  5487. if (!ecc->read_page)
  5488. ecc->read_page = nand_read_page_hwecc_oob_first;
  5489. case NAND_ECC_HW:
  5490. /* Use standard hwecc read page function? */
  5491. if (!ecc->read_page)
  5492. ecc->read_page = nand_read_page_hwecc;
  5493. if (!ecc->write_page)
  5494. ecc->write_page = nand_write_page_hwecc;
  5495. if (!ecc->read_page_raw)
  5496. ecc->read_page_raw = nand_read_page_raw;
  5497. if (!ecc->write_page_raw)
  5498. ecc->write_page_raw = nand_write_page_raw;
  5499. if (!ecc->read_oob)
  5500. ecc->read_oob = nand_read_oob_std;
  5501. if (!ecc->write_oob)
  5502. ecc->write_oob = nand_write_oob_std;
  5503. if (!ecc->read_subpage)
  5504. ecc->read_subpage = nand_read_subpage;
  5505. if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
  5506. ecc->write_subpage = nand_write_subpage_hwecc;
  5507. case NAND_ECC_HW_SYNDROME:
  5508. if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
  5509. (!ecc->read_page ||
  5510. ecc->read_page == nand_read_page_hwecc ||
  5511. !ecc->write_page ||
  5512. ecc->write_page == nand_write_page_hwecc)) {
  5513. WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
  5514. ret = -EINVAL;
  5515. goto err_nand_manuf_cleanup;
  5516. }
  5517. /* Use standard syndrome read/write page function? */
  5518. if (!ecc->read_page)
  5519. ecc->read_page = nand_read_page_syndrome;
  5520. if (!ecc->write_page)
  5521. ecc->write_page = nand_write_page_syndrome;
  5522. if (!ecc->read_page_raw)
  5523. ecc->read_page_raw = nand_read_page_raw_syndrome;
  5524. if (!ecc->write_page_raw)
  5525. ecc->write_page_raw = nand_write_page_raw_syndrome;
  5526. if (!ecc->read_oob)
  5527. ecc->read_oob = nand_read_oob_syndrome;
  5528. if (!ecc->write_oob)
  5529. ecc->write_oob = nand_write_oob_syndrome;
  5530. if (mtd->writesize >= ecc->size) {
  5531. if (!ecc->strength) {
  5532. WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
  5533. ret = -EINVAL;
  5534. goto err_nand_manuf_cleanup;
  5535. }
  5536. break;
  5537. }
  5538. pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
  5539. ecc->size, mtd->writesize);
  5540. ecc->mode = NAND_ECC_SOFT;
  5541. ecc->algo = NAND_ECC_HAMMING;
  5542. case NAND_ECC_SOFT:
  5543. ret = nand_set_ecc_soft_ops(mtd);
  5544. if (ret) {
  5545. ret = -EINVAL;
  5546. goto err_nand_manuf_cleanup;
  5547. }
  5548. break;
  5549. case NAND_ECC_ON_DIE:
  5550. if (!ecc->read_page || !ecc->write_page) {
  5551. WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
  5552. ret = -EINVAL;
  5553. goto err_nand_manuf_cleanup;
  5554. }
  5555. if (!ecc->read_oob)
  5556. ecc->read_oob = nand_read_oob_std;
  5557. if (!ecc->write_oob)
  5558. ecc->write_oob = nand_write_oob_std;
  5559. break;
  5560. case NAND_ECC_NONE:
  5561. pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
  5562. ecc->read_page = nand_read_page_raw;
  5563. ecc->write_page = nand_write_page_raw;
  5564. ecc->read_oob = nand_read_oob_std;
  5565. ecc->read_page_raw = nand_read_page_raw;
  5566. ecc->write_page_raw = nand_write_page_raw;
  5567. ecc->write_oob = nand_write_oob_std;
  5568. ecc->size = mtd->writesize;
  5569. ecc->bytes = 0;
  5570. ecc->strength = 0;
  5571. break;
  5572. default:
  5573. WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
  5574. ret = -EINVAL;
  5575. goto err_nand_manuf_cleanup;
  5576. }
  5577. if (ecc->correct || ecc->calculate) {
  5578. ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
  5579. ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
  5580. if (!ecc->calc_buf || !ecc->code_buf) {
  5581. ret = -ENOMEM;
  5582. goto err_nand_manuf_cleanup;
  5583. }
  5584. }
  5585. /* For many systems, the standard OOB write also works for raw */
  5586. if (!ecc->read_oob_raw)
  5587. ecc->read_oob_raw = ecc->read_oob;
  5588. if (!ecc->write_oob_raw)
  5589. ecc->write_oob_raw = ecc->write_oob;
  5590. /* propagate ecc info to mtd_info */
  5591. mtd->ecc_strength = ecc->strength;
  5592. mtd->ecc_step_size = ecc->size;
  5593. /*
  5594. * Set the number of read / write steps for one page depending on ECC
  5595. * mode.
  5596. */
  5597. ecc->steps = mtd->writesize / ecc->size;
  5598. if (ecc->steps * ecc->size != mtd->writesize) {
  5599. WARN(1, "Invalid ECC parameters\n");
  5600. ret = -EINVAL;
  5601. goto err_nand_manuf_cleanup;
  5602. }
  5603. ecc->total = ecc->steps * ecc->bytes;
  5604. if (ecc->total > mtd->oobsize) {
  5605. WARN(1, "Total number of ECC bytes exceeded oobsize\n");
  5606. ret = -EINVAL;
  5607. goto err_nand_manuf_cleanup;
  5608. }
  5609. /*
  5610. * The number of bytes available for a client to place data into
  5611. * the out of band area.
  5612. */
  5613. ret = mtd_ooblayout_count_freebytes(mtd);
  5614. if (ret < 0)
  5615. ret = 0;
  5616. mtd->oobavail = ret;
  5617. /* ECC sanity check: warn if it's too weak */
  5618. if (!nand_ecc_strength_good(mtd))
  5619. pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
  5620. mtd->name);
  5621. /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
  5622. if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
  5623. switch (ecc->steps) {
  5624. case 2:
  5625. mtd->subpage_sft = 1;
  5626. break;
  5627. case 4:
  5628. case 8:
  5629. case 16:
  5630. mtd->subpage_sft = 2;
  5631. break;
  5632. }
  5633. }
  5634. chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
  5635. /* Initialize state */
  5636. chip->state = FL_READY;
  5637. /* Invalidate the pagebuffer reference */
  5638. chip->pagebuf = -1;
  5639. /* Large page NAND with SOFT_ECC should support subpage reads */
  5640. switch (ecc->mode) {
  5641. case NAND_ECC_SOFT:
  5642. if (chip->page_shift > 9)
  5643. chip->options |= NAND_SUBPAGE_READ;
  5644. break;
  5645. default:
  5646. break;
  5647. }
  5648. /* Fill in remaining MTD driver data */
  5649. mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
  5650. mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
  5651. MTD_CAP_NANDFLASH;
  5652. mtd->_erase = nand_erase;
  5653. mtd->_point = NULL;
  5654. mtd->_unpoint = NULL;
  5655. mtd->_panic_write = panic_nand_write;
  5656. mtd->_read_oob = nand_read_oob;
  5657. mtd->_write_oob = nand_write_oob;
  5658. mtd->_sync = nand_sync;
  5659. mtd->_lock = NULL;
  5660. mtd->_unlock = NULL;
  5661. mtd->_suspend = nand_suspend;
  5662. mtd->_resume = nand_resume;
  5663. mtd->_reboot = nand_shutdown;
  5664. mtd->_block_isreserved = nand_block_isreserved;
  5665. mtd->_block_isbad = nand_block_isbad;
  5666. mtd->_block_markbad = nand_block_markbad;
  5667. mtd->_max_bad_blocks = nand_max_bad_blocks;
  5668. mtd->writebufsize = mtd->writesize;
  5669. /*
  5670. * Initialize bitflip_threshold to its default prior scan_bbt() call.
  5671. * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
  5672. * properly set.
  5673. */
  5674. if (!mtd->bitflip_threshold)
  5675. mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
  5676. /* Initialize the ->data_interface field. */
  5677. ret = nand_init_data_interface(chip);
  5678. if (ret)
  5679. goto err_nand_manuf_cleanup;
  5680. /* Enter fastest possible mode on all dies. */
  5681. for (i = 0; i < chip->numchips; i++) {
  5682. ret = nand_setup_data_interface(chip, i);
  5683. if (ret)
  5684. goto err_nand_manuf_cleanup;
  5685. }
  5686. /* Check, if we should skip the bad block table scan */
  5687. if (chip->options & NAND_SKIP_BBTSCAN)
  5688. return 0;
  5689. /* Build bad block table */
  5690. ret = nand_create_bbt(chip);
  5691. if (ret)
  5692. goto err_nand_manuf_cleanup;
  5693. return 0;
  5694. err_nand_manuf_cleanup:
  5695. nand_manufacturer_cleanup(chip);
  5696. err_free_buf:
  5697. kfree(chip->data_buf);
  5698. kfree(ecc->code_buf);
  5699. kfree(ecc->calc_buf);
  5700. return ret;
  5701. }
  5702. static int nand_attach(struct nand_chip *chip)
  5703. {
  5704. if (chip->controller->ops && chip->controller->ops->attach_chip)
  5705. return chip->controller->ops->attach_chip(chip);
  5706. return 0;
  5707. }
  5708. static void nand_detach(struct nand_chip *chip)
  5709. {
  5710. if (chip->controller->ops && chip->controller->ops->detach_chip)
  5711. chip->controller->ops->detach_chip(chip);
  5712. }
  5713. /**
  5714. * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
  5715. * @chip: NAND chip object
  5716. * @maxchips: number of chips to scan for.
  5717. * @ids: optional flash IDs table
  5718. *
  5719. * This fills out all the uninitialized function pointers with the defaults.
  5720. * The flash ID is read and the mtd/chip structures are filled with the
  5721. * appropriate values.
  5722. */
  5723. int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
  5724. struct nand_flash_dev *ids)
  5725. {
  5726. int ret;
  5727. if (!maxchips)
  5728. return -EINVAL;
  5729. ret = nand_scan_ident(chip, maxchips, ids);
  5730. if (ret)
  5731. return ret;
  5732. ret = nand_attach(chip);
  5733. if (ret)
  5734. goto cleanup_ident;
  5735. ret = nand_scan_tail(chip);
  5736. if (ret)
  5737. goto detach_chip;
  5738. return 0;
  5739. detach_chip:
  5740. nand_detach(chip);
  5741. cleanup_ident:
  5742. nand_scan_ident_cleanup(chip);
  5743. return ret;
  5744. }
  5745. EXPORT_SYMBOL(nand_scan_with_ids);
  5746. /**
  5747. * nand_cleanup - [NAND Interface] Free resources held by the NAND device
  5748. * @chip: NAND chip object
  5749. */
  5750. void nand_cleanup(struct nand_chip *chip)
  5751. {
  5752. if (chip->ecc.mode == NAND_ECC_SOFT &&
  5753. chip->ecc.algo == NAND_ECC_BCH)
  5754. nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
  5755. /* Free bad block table memory */
  5756. kfree(chip->bbt);
  5757. kfree(chip->data_buf);
  5758. kfree(chip->ecc.code_buf);
  5759. kfree(chip->ecc.calc_buf);
  5760. /* Free bad block descriptor memory */
  5761. if (chip->badblock_pattern && chip->badblock_pattern->options
  5762. & NAND_BBT_DYNAMICSTRUCT)
  5763. kfree(chip->badblock_pattern);
  5764. /* Free manufacturer priv data. */
  5765. nand_manufacturer_cleanup(chip);
  5766. /* Free controller specific allocations after chip identification */
  5767. nand_detach(chip);
  5768. /* Free identification phase allocations */
  5769. nand_scan_ident_cleanup(chip);
  5770. }
  5771. EXPORT_SYMBOL_GPL(nand_cleanup);
  5772. /**
  5773. * nand_release - [NAND Interface] Unregister the MTD device and free resources
  5774. * held by the NAND device
  5775. * @chip: NAND chip object
  5776. */
  5777. void nand_release(struct nand_chip *chip)
  5778. {
  5779. mtd_device_unregister(nand_to_mtd(chip));
  5780. nand_cleanup(chip);
  5781. }
  5782. EXPORT_SYMBOL_GPL(nand_release);
  5783. MODULE_LICENSE("GPL");
  5784. MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
  5785. MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
  5786. MODULE_DESCRIPTION("Generic NAND flash driver code");