super.c 167 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/ext4/super.c
  4. *
  5. * Copyright (C) 1992, 1993, 1994, 1995
  6. * Remy Card (card@masi.ibp.fr)
  7. * Laboratoire MASI - Institut Blaise Pascal
  8. * Universite Pierre et Marie Curie (Paris VI)
  9. *
  10. * from
  11. *
  12. * linux/fs/minix/inode.c
  13. *
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * Big-endian to little-endian byte-swapping/bitmaps by
  17. * David S. Miller (davem@caip.rutgers.edu), 1995
  18. */
  19. #include <linux/module.h>
  20. #include <linux/string.h>
  21. #include <linux/fs.h>
  22. #include <linux/time.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/slab.h>
  25. #include <linux/init.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/backing-dev.h>
  28. #include <linux/parser.h>
  29. #include <linux/buffer_head.h>
  30. #include <linux/exportfs.h>
  31. #include <linux/vfs.h>
  32. #include <linux/random.h>
  33. #include <linux/mount.h>
  34. #include <linux/namei.h>
  35. #include <linux/quotaops.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/ctype.h>
  38. #include <linux/log2.h>
  39. #include <linux/crc16.h>
  40. #include <linux/dax.h>
  41. #include <linux/cleancache.h>
  42. #include <linux/uaccess.h>
  43. #include <linux/iversion.h>
  44. #include <linux/kthread.h>
  45. #include <linux/freezer.h>
  46. #include "ext4.h"
  47. #include "ext4_extents.h" /* Needed for trace points definition */
  48. #include "ext4_jbd2.h"
  49. #include "xattr.h"
  50. #include "acl.h"
  51. #include "mballoc.h"
  52. #include "fsmap.h"
  53. #define CREATE_TRACE_POINTS
  54. #include <trace/events/ext4.h>
  55. static struct ext4_lazy_init *ext4_li_info;
  56. static struct mutex ext4_li_mtx;
  57. static struct ratelimit_state ext4_mount_msg_ratelimit;
  58. static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
  59. unsigned long journal_devnum);
  60. static int ext4_show_options(struct seq_file *seq, struct dentry *root);
  61. static int ext4_commit_super(struct super_block *sb, int sync);
  62. static void ext4_mark_recovery_complete(struct super_block *sb,
  63. struct ext4_super_block *es);
  64. static void ext4_clear_journal_err(struct super_block *sb,
  65. struct ext4_super_block *es);
  66. static int ext4_sync_fs(struct super_block *sb, int wait);
  67. static int ext4_remount(struct super_block *sb, int *flags, char *data);
  68. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
  69. static int ext4_unfreeze(struct super_block *sb);
  70. static int ext4_freeze(struct super_block *sb);
  71. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  72. const char *dev_name, void *data);
  73. static inline int ext2_feature_set_ok(struct super_block *sb);
  74. static inline int ext3_feature_set_ok(struct super_block *sb);
  75. static int ext4_feature_set_ok(struct super_block *sb, int readonly);
  76. static void ext4_destroy_lazyinit_thread(void);
  77. static void ext4_unregister_li_request(struct super_block *sb);
  78. static void ext4_clear_request_list(void);
  79. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  80. unsigned int journal_inum);
  81. /*
  82. * Lock ordering
  83. *
  84. * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
  85. * i_mmap_rwsem (inode->i_mmap_rwsem)!
  86. *
  87. * page fault path:
  88. * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
  89. * page lock -> i_data_sem (rw)
  90. *
  91. * buffered write path:
  92. * sb_start_write -> i_mutex -> mmap_sem
  93. * sb_start_write -> i_mutex -> transaction start -> page lock ->
  94. * i_data_sem (rw)
  95. *
  96. * truncate:
  97. * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
  98. * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
  99. * i_data_sem (rw)
  100. *
  101. * direct IO:
  102. * sb_start_write -> i_mutex -> mmap_sem
  103. * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
  104. *
  105. * writepages:
  106. * transaction start -> page lock(s) -> i_data_sem (rw)
  107. */
  108. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  109. static struct file_system_type ext2_fs_type = {
  110. .owner = THIS_MODULE,
  111. .name = "ext2",
  112. .mount = ext4_mount,
  113. .kill_sb = kill_block_super,
  114. .fs_flags = FS_REQUIRES_DEV,
  115. };
  116. MODULE_ALIAS_FS("ext2");
  117. MODULE_ALIAS("ext2");
  118. #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
  119. #else
  120. #define IS_EXT2_SB(sb) (0)
  121. #endif
  122. static struct file_system_type ext3_fs_type = {
  123. .owner = THIS_MODULE,
  124. .name = "ext3",
  125. .mount = ext4_mount,
  126. .kill_sb = kill_block_super,
  127. .fs_flags = FS_REQUIRES_DEV,
  128. };
  129. MODULE_ALIAS_FS("ext3");
  130. MODULE_ALIAS("ext3");
  131. #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
  132. static int ext4_verify_csum_type(struct super_block *sb,
  133. struct ext4_super_block *es)
  134. {
  135. if (!ext4_has_feature_metadata_csum(sb))
  136. return 1;
  137. return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
  138. }
  139. static __le32 ext4_superblock_csum(struct super_block *sb,
  140. struct ext4_super_block *es)
  141. {
  142. struct ext4_sb_info *sbi = EXT4_SB(sb);
  143. int offset = offsetof(struct ext4_super_block, s_checksum);
  144. __u32 csum;
  145. csum = ext4_chksum(sbi, ~0, (char *)es, offset);
  146. return cpu_to_le32(csum);
  147. }
  148. static int ext4_superblock_csum_verify(struct super_block *sb,
  149. struct ext4_super_block *es)
  150. {
  151. if (!ext4_has_metadata_csum(sb))
  152. return 1;
  153. return es->s_checksum == ext4_superblock_csum(sb, es);
  154. }
  155. void ext4_superblock_csum_set(struct super_block *sb)
  156. {
  157. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  158. if (!ext4_has_metadata_csum(sb))
  159. return;
  160. es->s_checksum = ext4_superblock_csum(sb, es);
  161. }
  162. void *ext4_kvmalloc(size_t size, gfp_t flags)
  163. {
  164. void *ret;
  165. ret = kmalloc(size, flags | __GFP_NOWARN);
  166. if (!ret)
  167. ret = __vmalloc(size, flags, PAGE_KERNEL);
  168. return ret;
  169. }
  170. void *ext4_kvzalloc(size_t size, gfp_t flags)
  171. {
  172. void *ret;
  173. ret = kzalloc(size, flags | __GFP_NOWARN);
  174. if (!ret)
  175. ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
  176. return ret;
  177. }
  178. ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
  179. struct ext4_group_desc *bg)
  180. {
  181. return le32_to_cpu(bg->bg_block_bitmap_lo) |
  182. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  183. (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
  184. }
  185. ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
  186. struct ext4_group_desc *bg)
  187. {
  188. return le32_to_cpu(bg->bg_inode_bitmap_lo) |
  189. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  190. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
  191. }
  192. ext4_fsblk_t ext4_inode_table(struct super_block *sb,
  193. struct ext4_group_desc *bg)
  194. {
  195. return le32_to_cpu(bg->bg_inode_table_lo) |
  196. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  197. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
  198. }
  199. __u32 ext4_free_group_clusters(struct super_block *sb,
  200. struct ext4_group_desc *bg)
  201. {
  202. return le16_to_cpu(bg->bg_free_blocks_count_lo) |
  203. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  204. (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
  205. }
  206. __u32 ext4_free_inodes_count(struct super_block *sb,
  207. struct ext4_group_desc *bg)
  208. {
  209. return le16_to_cpu(bg->bg_free_inodes_count_lo) |
  210. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  211. (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
  212. }
  213. __u32 ext4_used_dirs_count(struct super_block *sb,
  214. struct ext4_group_desc *bg)
  215. {
  216. return le16_to_cpu(bg->bg_used_dirs_count_lo) |
  217. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  218. (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
  219. }
  220. __u32 ext4_itable_unused_count(struct super_block *sb,
  221. struct ext4_group_desc *bg)
  222. {
  223. return le16_to_cpu(bg->bg_itable_unused_lo) |
  224. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  225. (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
  226. }
  227. void ext4_block_bitmap_set(struct super_block *sb,
  228. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  229. {
  230. bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
  231. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  232. bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
  233. }
  234. void ext4_inode_bitmap_set(struct super_block *sb,
  235. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  236. {
  237. bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
  238. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  239. bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
  240. }
  241. void ext4_inode_table_set(struct super_block *sb,
  242. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  243. {
  244. bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
  245. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  246. bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
  247. }
  248. void ext4_free_group_clusters_set(struct super_block *sb,
  249. struct ext4_group_desc *bg, __u32 count)
  250. {
  251. bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
  252. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  253. bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
  254. }
  255. void ext4_free_inodes_set(struct super_block *sb,
  256. struct ext4_group_desc *bg, __u32 count)
  257. {
  258. bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
  259. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  260. bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
  261. }
  262. void ext4_used_dirs_set(struct super_block *sb,
  263. struct ext4_group_desc *bg, __u32 count)
  264. {
  265. bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
  266. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  267. bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
  268. }
  269. void ext4_itable_unused_set(struct super_block *sb,
  270. struct ext4_group_desc *bg, __u32 count)
  271. {
  272. bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
  273. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  274. bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
  275. }
  276. static void __save_error_info(struct super_block *sb, const char *func,
  277. unsigned int line)
  278. {
  279. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  280. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  281. if (bdev_read_only(sb->s_bdev))
  282. return;
  283. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  284. es->s_last_error_time = cpu_to_le32(get_seconds());
  285. strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
  286. es->s_last_error_line = cpu_to_le32(line);
  287. if (!es->s_first_error_time) {
  288. es->s_first_error_time = es->s_last_error_time;
  289. strncpy(es->s_first_error_func, func,
  290. sizeof(es->s_first_error_func));
  291. es->s_first_error_line = cpu_to_le32(line);
  292. es->s_first_error_ino = es->s_last_error_ino;
  293. es->s_first_error_block = es->s_last_error_block;
  294. }
  295. /*
  296. * Start the daily error reporting function if it hasn't been
  297. * started already
  298. */
  299. if (!es->s_error_count)
  300. mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
  301. le32_add_cpu(&es->s_error_count, 1);
  302. }
  303. static void save_error_info(struct super_block *sb, const char *func,
  304. unsigned int line)
  305. {
  306. __save_error_info(sb, func, line);
  307. ext4_commit_super(sb, 1);
  308. }
  309. /*
  310. * The del_gendisk() function uninitializes the disk-specific data
  311. * structures, including the bdi structure, without telling anyone
  312. * else. Once this happens, any attempt to call mark_buffer_dirty()
  313. * (for example, by ext4_commit_super), will cause a kernel OOPS.
  314. * This is a kludge to prevent these oops until we can put in a proper
  315. * hook in del_gendisk() to inform the VFS and file system layers.
  316. */
  317. static int block_device_ejected(struct super_block *sb)
  318. {
  319. struct inode *bd_inode = sb->s_bdev->bd_inode;
  320. struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
  321. return bdi->dev == NULL;
  322. }
  323. static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
  324. {
  325. struct super_block *sb = journal->j_private;
  326. struct ext4_sb_info *sbi = EXT4_SB(sb);
  327. int error = is_journal_aborted(journal);
  328. struct ext4_journal_cb_entry *jce;
  329. BUG_ON(txn->t_state == T_FINISHED);
  330. ext4_process_freed_data(sb, txn->t_tid);
  331. spin_lock(&sbi->s_md_lock);
  332. while (!list_empty(&txn->t_private_list)) {
  333. jce = list_entry(txn->t_private_list.next,
  334. struct ext4_journal_cb_entry, jce_list);
  335. list_del_init(&jce->jce_list);
  336. spin_unlock(&sbi->s_md_lock);
  337. jce->jce_func(sb, jce, error);
  338. spin_lock(&sbi->s_md_lock);
  339. }
  340. spin_unlock(&sbi->s_md_lock);
  341. }
  342. /* Deal with the reporting of failure conditions on a filesystem such as
  343. * inconsistencies detected or read IO failures.
  344. *
  345. * On ext2, we can store the error state of the filesystem in the
  346. * superblock. That is not possible on ext4, because we may have other
  347. * write ordering constraints on the superblock which prevent us from
  348. * writing it out straight away; and given that the journal is about to
  349. * be aborted, we can't rely on the current, or future, transactions to
  350. * write out the superblock safely.
  351. *
  352. * We'll just use the jbd2_journal_abort() error code to record an error in
  353. * the journal instead. On recovery, the journal will complain about
  354. * that error until we've noted it down and cleared it.
  355. */
  356. static void ext4_handle_error(struct super_block *sb)
  357. {
  358. if (sb_rdonly(sb))
  359. return;
  360. if (!test_opt(sb, ERRORS_CONT)) {
  361. journal_t *journal = EXT4_SB(sb)->s_journal;
  362. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  363. if (journal)
  364. jbd2_journal_abort(journal, -EIO);
  365. }
  366. if (test_opt(sb, ERRORS_RO)) {
  367. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  368. /*
  369. * Make sure updated value of ->s_mount_flags will be visible
  370. * before ->s_flags update
  371. */
  372. smp_wmb();
  373. sb->s_flags |= SB_RDONLY;
  374. }
  375. if (test_opt(sb, ERRORS_PANIC)) {
  376. if (EXT4_SB(sb)->s_journal &&
  377. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  378. return;
  379. panic("EXT4-fs (device %s): panic forced after error\n",
  380. sb->s_id);
  381. }
  382. }
  383. #define ext4_error_ratelimit(sb) \
  384. ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
  385. "EXT4-fs error")
  386. void __ext4_error(struct super_block *sb, const char *function,
  387. unsigned int line, const char *fmt, ...)
  388. {
  389. struct va_format vaf;
  390. va_list args;
  391. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  392. return;
  393. trace_ext4_error(sb, function, line);
  394. if (ext4_error_ratelimit(sb)) {
  395. va_start(args, fmt);
  396. vaf.fmt = fmt;
  397. vaf.va = &args;
  398. printk(KERN_CRIT
  399. "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
  400. sb->s_id, function, line, current->comm, &vaf);
  401. va_end(args);
  402. }
  403. save_error_info(sb, function, line);
  404. ext4_handle_error(sb);
  405. }
  406. void __ext4_error_inode(struct inode *inode, const char *function,
  407. unsigned int line, ext4_fsblk_t block,
  408. const char *fmt, ...)
  409. {
  410. va_list args;
  411. struct va_format vaf;
  412. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  413. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  414. return;
  415. trace_ext4_error(inode->i_sb, function, line);
  416. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  417. es->s_last_error_block = cpu_to_le64(block);
  418. if (ext4_error_ratelimit(inode->i_sb)) {
  419. va_start(args, fmt);
  420. vaf.fmt = fmt;
  421. vaf.va = &args;
  422. if (block)
  423. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  424. "inode #%lu: block %llu: comm %s: %pV\n",
  425. inode->i_sb->s_id, function, line, inode->i_ino,
  426. block, current->comm, &vaf);
  427. else
  428. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  429. "inode #%lu: comm %s: %pV\n",
  430. inode->i_sb->s_id, function, line, inode->i_ino,
  431. current->comm, &vaf);
  432. va_end(args);
  433. }
  434. save_error_info(inode->i_sb, function, line);
  435. ext4_handle_error(inode->i_sb);
  436. }
  437. void __ext4_error_file(struct file *file, const char *function,
  438. unsigned int line, ext4_fsblk_t block,
  439. const char *fmt, ...)
  440. {
  441. va_list args;
  442. struct va_format vaf;
  443. struct ext4_super_block *es;
  444. struct inode *inode = file_inode(file);
  445. char pathname[80], *path;
  446. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  447. return;
  448. trace_ext4_error(inode->i_sb, function, line);
  449. es = EXT4_SB(inode->i_sb)->s_es;
  450. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  451. if (ext4_error_ratelimit(inode->i_sb)) {
  452. path = file_path(file, pathname, sizeof(pathname));
  453. if (IS_ERR(path))
  454. path = "(unknown)";
  455. va_start(args, fmt);
  456. vaf.fmt = fmt;
  457. vaf.va = &args;
  458. if (block)
  459. printk(KERN_CRIT
  460. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  461. "block %llu: comm %s: path %s: %pV\n",
  462. inode->i_sb->s_id, function, line, inode->i_ino,
  463. block, current->comm, path, &vaf);
  464. else
  465. printk(KERN_CRIT
  466. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  467. "comm %s: path %s: %pV\n",
  468. inode->i_sb->s_id, function, line, inode->i_ino,
  469. current->comm, path, &vaf);
  470. va_end(args);
  471. }
  472. save_error_info(inode->i_sb, function, line);
  473. ext4_handle_error(inode->i_sb);
  474. }
  475. const char *ext4_decode_error(struct super_block *sb, int errno,
  476. char nbuf[16])
  477. {
  478. char *errstr = NULL;
  479. switch (errno) {
  480. case -EFSCORRUPTED:
  481. errstr = "Corrupt filesystem";
  482. break;
  483. case -EFSBADCRC:
  484. errstr = "Filesystem failed CRC";
  485. break;
  486. case -EIO:
  487. errstr = "IO failure";
  488. break;
  489. case -ENOMEM:
  490. errstr = "Out of memory";
  491. break;
  492. case -EROFS:
  493. if (!sb || (EXT4_SB(sb)->s_journal &&
  494. EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
  495. errstr = "Journal has aborted";
  496. else
  497. errstr = "Readonly filesystem";
  498. break;
  499. default:
  500. /* If the caller passed in an extra buffer for unknown
  501. * errors, textualise them now. Else we just return
  502. * NULL. */
  503. if (nbuf) {
  504. /* Check for truncated error codes... */
  505. if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
  506. errstr = nbuf;
  507. }
  508. break;
  509. }
  510. return errstr;
  511. }
  512. /* __ext4_std_error decodes expected errors from journaling functions
  513. * automatically and invokes the appropriate error response. */
  514. void __ext4_std_error(struct super_block *sb, const char *function,
  515. unsigned int line, int errno)
  516. {
  517. char nbuf[16];
  518. const char *errstr;
  519. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  520. return;
  521. /* Special case: if the error is EROFS, and we're not already
  522. * inside a transaction, then there's really no point in logging
  523. * an error. */
  524. if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
  525. return;
  526. if (ext4_error_ratelimit(sb)) {
  527. errstr = ext4_decode_error(sb, errno, nbuf);
  528. printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
  529. sb->s_id, function, line, errstr);
  530. }
  531. save_error_info(sb, function, line);
  532. ext4_handle_error(sb);
  533. }
  534. /*
  535. * ext4_abort is a much stronger failure handler than ext4_error. The
  536. * abort function may be used to deal with unrecoverable failures such
  537. * as journal IO errors or ENOMEM at a critical moment in log management.
  538. *
  539. * We unconditionally force the filesystem into an ABORT|READONLY state,
  540. * unless the error response on the fs has been set to panic in which
  541. * case we take the easy way out and panic immediately.
  542. */
  543. void __ext4_abort(struct super_block *sb, const char *function,
  544. unsigned int line, const char *fmt, ...)
  545. {
  546. struct va_format vaf;
  547. va_list args;
  548. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  549. return;
  550. save_error_info(sb, function, line);
  551. va_start(args, fmt);
  552. vaf.fmt = fmt;
  553. vaf.va = &args;
  554. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
  555. sb->s_id, function, line, &vaf);
  556. va_end(args);
  557. if (sb_rdonly(sb) == 0) {
  558. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  559. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  560. /*
  561. * Make sure updated value of ->s_mount_flags will be visible
  562. * before ->s_flags update
  563. */
  564. smp_wmb();
  565. sb->s_flags |= SB_RDONLY;
  566. if (EXT4_SB(sb)->s_journal)
  567. jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
  568. save_error_info(sb, function, line);
  569. }
  570. if (test_opt(sb, ERRORS_PANIC)) {
  571. if (EXT4_SB(sb)->s_journal &&
  572. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  573. return;
  574. panic("EXT4-fs panic from previous error\n");
  575. }
  576. }
  577. void __ext4_msg(struct super_block *sb,
  578. const char *prefix, const char *fmt, ...)
  579. {
  580. struct va_format vaf;
  581. va_list args;
  582. if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
  583. return;
  584. va_start(args, fmt);
  585. vaf.fmt = fmt;
  586. vaf.va = &args;
  587. printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  588. va_end(args);
  589. }
  590. #define ext4_warning_ratelimit(sb) \
  591. ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
  592. "EXT4-fs warning")
  593. void __ext4_warning(struct super_block *sb, const char *function,
  594. unsigned int line, const char *fmt, ...)
  595. {
  596. struct va_format vaf;
  597. va_list args;
  598. if (!ext4_warning_ratelimit(sb))
  599. return;
  600. va_start(args, fmt);
  601. vaf.fmt = fmt;
  602. vaf.va = &args;
  603. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
  604. sb->s_id, function, line, &vaf);
  605. va_end(args);
  606. }
  607. void __ext4_warning_inode(const struct inode *inode, const char *function,
  608. unsigned int line, const char *fmt, ...)
  609. {
  610. struct va_format vaf;
  611. va_list args;
  612. if (!ext4_warning_ratelimit(inode->i_sb))
  613. return;
  614. va_start(args, fmt);
  615. vaf.fmt = fmt;
  616. vaf.va = &args;
  617. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
  618. "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
  619. function, line, inode->i_ino, current->comm, &vaf);
  620. va_end(args);
  621. }
  622. void __ext4_grp_locked_error(const char *function, unsigned int line,
  623. struct super_block *sb, ext4_group_t grp,
  624. unsigned long ino, ext4_fsblk_t block,
  625. const char *fmt, ...)
  626. __releases(bitlock)
  627. __acquires(bitlock)
  628. {
  629. struct va_format vaf;
  630. va_list args;
  631. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  632. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  633. return;
  634. trace_ext4_error(sb, function, line);
  635. es->s_last_error_ino = cpu_to_le32(ino);
  636. es->s_last_error_block = cpu_to_le64(block);
  637. __save_error_info(sb, function, line);
  638. if (ext4_error_ratelimit(sb)) {
  639. va_start(args, fmt);
  640. vaf.fmt = fmt;
  641. vaf.va = &args;
  642. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
  643. sb->s_id, function, line, grp);
  644. if (ino)
  645. printk(KERN_CONT "inode %lu: ", ino);
  646. if (block)
  647. printk(KERN_CONT "block %llu:",
  648. (unsigned long long) block);
  649. printk(KERN_CONT "%pV\n", &vaf);
  650. va_end(args);
  651. }
  652. if (test_opt(sb, ERRORS_CONT)) {
  653. ext4_commit_super(sb, 0);
  654. return;
  655. }
  656. ext4_unlock_group(sb, grp);
  657. ext4_commit_super(sb, 1);
  658. ext4_handle_error(sb);
  659. /*
  660. * We only get here in the ERRORS_RO case; relocking the group
  661. * may be dangerous, but nothing bad will happen since the
  662. * filesystem will have already been marked read/only and the
  663. * journal has been aborted. We return 1 as a hint to callers
  664. * who might what to use the return value from
  665. * ext4_grp_locked_error() to distinguish between the
  666. * ERRORS_CONT and ERRORS_RO case, and perhaps return more
  667. * aggressively from the ext4 function in question, with a
  668. * more appropriate error code.
  669. */
  670. ext4_lock_group(sb, grp);
  671. return;
  672. }
  673. void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
  674. ext4_group_t group,
  675. unsigned int flags)
  676. {
  677. struct ext4_sb_info *sbi = EXT4_SB(sb);
  678. struct ext4_group_info *grp = ext4_get_group_info(sb, group);
  679. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
  680. if ((flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) &&
  681. !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) {
  682. percpu_counter_sub(&sbi->s_freeclusters_counter,
  683. grp->bb_free);
  684. set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
  685. &grp->bb_state);
  686. }
  687. if ((flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) &&
  688. !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
  689. if (gdp) {
  690. int count;
  691. count = ext4_free_inodes_count(sb, gdp);
  692. percpu_counter_sub(&sbi->s_freeinodes_counter,
  693. count);
  694. }
  695. set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
  696. &grp->bb_state);
  697. }
  698. }
  699. void ext4_update_dynamic_rev(struct super_block *sb)
  700. {
  701. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  702. if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
  703. return;
  704. ext4_warning(sb,
  705. "updating to rev %d because of new feature flag, "
  706. "running e2fsck is recommended",
  707. EXT4_DYNAMIC_REV);
  708. es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
  709. es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
  710. es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
  711. /* leave es->s_feature_*compat flags alone */
  712. /* es->s_uuid will be set by e2fsck if empty */
  713. /*
  714. * The rest of the superblock fields should be zero, and if not it
  715. * means they are likely already in use, so leave them alone. We
  716. * can leave it up to e2fsck to clean up any inconsistencies there.
  717. */
  718. }
  719. /*
  720. * Open the external journal device
  721. */
  722. static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
  723. {
  724. struct block_device *bdev;
  725. char b[BDEVNAME_SIZE];
  726. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
  727. if (IS_ERR(bdev))
  728. goto fail;
  729. return bdev;
  730. fail:
  731. ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
  732. __bdevname(dev, b), PTR_ERR(bdev));
  733. return NULL;
  734. }
  735. /*
  736. * Release the journal device
  737. */
  738. static void ext4_blkdev_put(struct block_device *bdev)
  739. {
  740. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  741. }
  742. static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
  743. {
  744. struct block_device *bdev;
  745. bdev = sbi->journal_bdev;
  746. if (bdev) {
  747. ext4_blkdev_put(bdev);
  748. sbi->journal_bdev = NULL;
  749. }
  750. }
  751. static inline struct inode *orphan_list_entry(struct list_head *l)
  752. {
  753. return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
  754. }
  755. static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
  756. {
  757. struct list_head *l;
  758. ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
  759. le32_to_cpu(sbi->s_es->s_last_orphan));
  760. printk(KERN_ERR "sb_info orphan list:\n");
  761. list_for_each(l, &sbi->s_orphan) {
  762. struct inode *inode = orphan_list_entry(l);
  763. printk(KERN_ERR " "
  764. "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
  765. inode->i_sb->s_id, inode->i_ino, inode,
  766. inode->i_mode, inode->i_nlink,
  767. NEXT_ORPHAN(inode));
  768. }
  769. }
  770. #ifdef CONFIG_QUOTA
  771. static int ext4_quota_off(struct super_block *sb, int type);
  772. static inline void ext4_quota_off_umount(struct super_block *sb)
  773. {
  774. int type;
  775. /* Use our quota_off function to clear inode flags etc. */
  776. for (type = 0; type < EXT4_MAXQUOTAS; type++)
  777. ext4_quota_off(sb, type);
  778. }
  779. #else
  780. static inline void ext4_quota_off_umount(struct super_block *sb)
  781. {
  782. }
  783. #endif
  784. static void ext4_put_super(struct super_block *sb)
  785. {
  786. struct ext4_sb_info *sbi = EXT4_SB(sb);
  787. struct ext4_super_block *es = sbi->s_es;
  788. int aborted = 0;
  789. int i, err;
  790. ext4_unregister_li_request(sb);
  791. ext4_quota_off_umount(sb);
  792. destroy_workqueue(sbi->rsv_conversion_wq);
  793. if (sbi->s_journal) {
  794. aborted = is_journal_aborted(sbi->s_journal);
  795. err = jbd2_journal_destroy(sbi->s_journal);
  796. sbi->s_journal = NULL;
  797. if ((err < 0) && !aborted)
  798. ext4_abort(sb, "Couldn't clean up the journal");
  799. }
  800. ext4_unregister_sysfs(sb);
  801. ext4_es_unregister_shrinker(sbi);
  802. del_timer_sync(&sbi->s_err_report);
  803. ext4_release_system_zone(sb);
  804. ext4_mb_release(sb);
  805. ext4_ext_release(sb);
  806. if (!sb_rdonly(sb) && !aborted) {
  807. ext4_clear_feature_journal_needs_recovery(sb);
  808. es->s_state = cpu_to_le16(sbi->s_mount_state);
  809. }
  810. if (!sb_rdonly(sb))
  811. ext4_commit_super(sb, 1);
  812. for (i = 0; i < sbi->s_gdb_count; i++)
  813. brelse(sbi->s_group_desc[i]);
  814. kvfree(sbi->s_group_desc);
  815. kvfree(sbi->s_flex_groups);
  816. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  817. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  818. percpu_counter_destroy(&sbi->s_dirs_counter);
  819. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  820. percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
  821. #ifdef CONFIG_QUOTA
  822. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  823. kfree(sbi->s_qf_names[i]);
  824. #endif
  825. /* Debugging code just in case the in-memory inode orphan list
  826. * isn't empty. The on-disk one can be non-empty if we've
  827. * detected an error and taken the fs readonly, but the
  828. * in-memory list had better be clean by this point. */
  829. if (!list_empty(&sbi->s_orphan))
  830. dump_orphan_list(sb, sbi);
  831. J_ASSERT(list_empty(&sbi->s_orphan));
  832. sync_blockdev(sb->s_bdev);
  833. invalidate_bdev(sb->s_bdev);
  834. if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
  835. /*
  836. * Invalidate the journal device's buffers. We don't want them
  837. * floating about in memory - the physical journal device may
  838. * hotswapped, and it breaks the `ro-after' testing code.
  839. */
  840. sync_blockdev(sbi->journal_bdev);
  841. invalidate_bdev(sbi->journal_bdev);
  842. ext4_blkdev_remove(sbi);
  843. }
  844. if (sbi->s_ea_inode_cache) {
  845. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  846. sbi->s_ea_inode_cache = NULL;
  847. }
  848. if (sbi->s_ea_block_cache) {
  849. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  850. sbi->s_ea_block_cache = NULL;
  851. }
  852. if (sbi->s_mmp_tsk)
  853. kthread_stop(sbi->s_mmp_tsk);
  854. brelse(sbi->s_sbh);
  855. sb->s_fs_info = NULL;
  856. /*
  857. * Now that we are completely done shutting down the
  858. * superblock, we need to actually destroy the kobject.
  859. */
  860. kobject_put(&sbi->s_kobj);
  861. wait_for_completion(&sbi->s_kobj_unregister);
  862. if (sbi->s_chksum_driver)
  863. crypto_free_shash(sbi->s_chksum_driver);
  864. kfree(sbi->s_blockgroup_lock);
  865. fs_put_dax(sbi->s_daxdev);
  866. kfree(sbi);
  867. }
  868. static struct kmem_cache *ext4_inode_cachep;
  869. /*
  870. * Called inside transaction, so use GFP_NOFS
  871. */
  872. static struct inode *ext4_alloc_inode(struct super_block *sb)
  873. {
  874. struct ext4_inode_info *ei;
  875. ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
  876. if (!ei)
  877. return NULL;
  878. inode_set_iversion(&ei->vfs_inode, 1);
  879. spin_lock_init(&ei->i_raw_lock);
  880. INIT_LIST_HEAD(&ei->i_prealloc_list);
  881. spin_lock_init(&ei->i_prealloc_lock);
  882. ext4_es_init_tree(&ei->i_es_tree);
  883. rwlock_init(&ei->i_es_lock);
  884. INIT_LIST_HEAD(&ei->i_es_list);
  885. ei->i_es_all_nr = 0;
  886. ei->i_es_shk_nr = 0;
  887. ei->i_es_shrink_lblk = 0;
  888. ei->i_reserved_data_blocks = 0;
  889. ei->i_da_metadata_calc_len = 0;
  890. ei->i_da_metadata_calc_last_lblock = 0;
  891. spin_lock_init(&(ei->i_block_reservation_lock));
  892. #ifdef CONFIG_QUOTA
  893. ei->i_reserved_quota = 0;
  894. memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
  895. #endif
  896. ei->jinode = NULL;
  897. INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
  898. spin_lock_init(&ei->i_completed_io_lock);
  899. ei->i_sync_tid = 0;
  900. ei->i_datasync_tid = 0;
  901. atomic_set(&ei->i_unwritten, 0);
  902. INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
  903. return &ei->vfs_inode;
  904. }
  905. static int ext4_drop_inode(struct inode *inode)
  906. {
  907. int drop = generic_drop_inode(inode);
  908. trace_ext4_drop_inode(inode, drop);
  909. return drop;
  910. }
  911. static void ext4_i_callback(struct rcu_head *head)
  912. {
  913. struct inode *inode = container_of(head, struct inode, i_rcu);
  914. kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
  915. }
  916. static void ext4_destroy_inode(struct inode *inode)
  917. {
  918. if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
  919. ext4_msg(inode->i_sb, KERN_ERR,
  920. "Inode %lu (%p): orphan list check failed!",
  921. inode->i_ino, EXT4_I(inode));
  922. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
  923. EXT4_I(inode), sizeof(struct ext4_inode_info),
  924. true);
  925. dump_stack();
  926. }
  927. call_rcu(&inode->i_rcu, ext4_i_callback);
  928. }
  929. static void init_once(void *foo)
  930. {
  931. struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
  932. INIT_LIST_HEAD(&ei->i_orphan);
  933. init_rwsem(&ei->xattr_sem);
  934. init_rwsem(&ei->i_data_sem);
  935. init_rwsem(&ei->i_mmap_sem);
  936. inode_init_once(&ei->vfs_inode);
  937. }
  938. static int __init init_inodecache(void)
  939. {
  940. ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
  941. sizeof(struct ext4_inode_info), 0,
  942. (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
  943. SLAB_ACCOUNT),
  944. offsetof(struct ext4_inode_info, i_data),
  945. sizeof_field(struct ext4_inode_info, i_data),
  946. init_once);
  947. if (ext4_inode_cachep == NULL)
  948. return -ENOMEM;
  949. return 0;
  950. }
  951. static void destroy_inodecache(void)
  952. {
  953. /*
  954. * Make sure all delayed rcu free inodes are flushed before we
  955. * destroy cache.
  956. */
  957. rcu_barrier();
  958. kmem_cache_destroy(ext4_inode_cachep);
  959. }
  960. void ext4_clear_inode(struct inode *inode)
  961. {
  962. invalidate_inode_buffers(inode);
  963. clear_inode(inode);
  964. dquot_drop(inode);
  965. ext4_discard_preallocations(inode);
  966. ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
  967. if (EXT4_I(inode)->jinode) {
  968. jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
  969. EXT4_I(inode)->jinode);
  970. jbd2_free_inode(EXT4_I(inode)->jinode);
  971. EXT4_I(inode)->jinode = NULL;
  972. }
  973. fscrypt_put_encryption_info(inode);
  974. }
  975. static struct inode *ext4_nfs_get_inode(struct super_block *sb,
  976. u64 ino, u32 generation)
  977. {
  978. struct inode *inode;
  979. if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
  980. return ERR_PTR(-ESTALE);
  981. if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
  982. return ERR_PTR(-ESTALE);
  983. /* iget isn't really right if the inode is currently unallocated!!
  984. *
  985. * ext4_read_inode will return a bad_inode if the inode had been
  986. * deleted, so we should be safe.
  987. *
  988. * Currently we don't know the generation for parent directory, so
  989. * a generation of 0 means "accept any"
  990. */
  991. inode = ext4_iget_normal(sb, ino);
  992. if (IS_ERR(inode))
  993. return ERR_CAST(inode);
  994. if (generation && inode->i_generation != generation) {
  995. iput(inode);
  996. return ERR_PTR(-ESTALE);
  997. }
  998. return inode;
  999. }
  1000. static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
  1001. int fh_len, int fh_type)
  1002. {
  1003. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  1004. ext4_nfs_get_inode);
  1005. }
  1006. static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
  1007. int fh_len, int fh_type)
  1008. {
  1009. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  1010. ext4_nfs_get_inode);
  1011. }
  1012. /*
  1013. * Try to release metadata pages (indirect blocks, directories) which are
  1014. * mapped via the block device. Since these pages could have journal heads
  1015. * which would prevent try_to_free_buffers() from freeing them, we must use
  1016. * jbd2 layer's try_to_free_buffers() function to release them.
  1017. */
  1018. static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
  1019. gfp_t wait)
  1020. {
  1021. journal_t *journal = EXT4_SB(sb)->s_journal;
  1022. WARN_ON(PageChecked(page));
  1023. if (!page_has_buffers(page))
  1024. return 0;
  1025. if (journal)
  1026. return jbd2_journal_try_to_free_buffers(journal, page,
  1027. wait & ~__GFP_DIRECT_RECLAIM);
  1028. return try_to_free_buffers(page);
  1029. }
  1030. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1031. static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
  1032. {
  1033. return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1034. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
  1035. }
  1036. static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
  1037. void *fs_data)
  1038. {
  1039. handle_t *handle = fs_data;
  1040. int res, res2, credits, retries = 0;
  1041. /*
  1042. * Encrypting the root directory is not allowed because e2fsck expects
  1043. * lost+found to exist and be unencrypted, and encrypting the root
  1044. * directory would imply encrypting the lost+found directory as well as
  1045. * the filename "lost+found" itself.
  1046. */
  1047. if (inode->i_ino == EXT4_ROOT_INO)
  1048. return -EPERM;
  1049. if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
  1050. return -EINVAL;
  1051. res = ext4_convert_inline_data(inode);
  1052. if (res)
  1053. return res;
  1054. /*
  1055. * If a journal handle was specified, then the encryption context is
  1056. * being set on a new inode via inheritance and is part of a larger
  1057. * transaction to create the inode. Otherwise the encryption context is
  1058. * being set on an existing inode in its own transaction. Only in the
  1059. * latter case should the "retry on ENOSPC" logic be used.
  1060. */
  1061. if (handle) {
  1062. res = ext4_xattr_set_handle(handle, inode,
  1063. EXT4_XATTR_INDEX_ENCRYPTION,
  1064. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1065. ctx, len, 0);
  1066. if (!res) {
  1067. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1068. ext4_clear_inode_state(inode,
  1069. EXT4_STATE_MAY_INLINE_DATA);
  1070. /*
  1071. * Update inode->i_flags - S_ENCRYPTED will be enabled,
  1072. * S_DAX may be disabled
  1073. */
  1074. ext4_set_inode_flags(inode);
  1075. }
  1076. return res;
  1077. }
  1078. res = dquot_initialize(inode);
  1079. if (res)
  1080. return res;
  1081. retry:
  1082. res = ext4_xattr_set_credits(inode, len, false /* is_create */,
  1083. &credits);
  1084. if (res)
  1085. return res;
  1086. handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
  1087. if (IS_ERR(handle))
  1088. return PTR_ERR(handle);
  1089. res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1090. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1091. ctx, len, 0);
  1092. if (!res) {
  1093. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1094. /*
  1095. * Update inode->i_flags - S_ENCRYPTED will be enabled,
  1096. * S_DAX may be disabled
  1097. */
  1098. ext4_set_inode_flags(inode);
  1099. res = ext4_mark_inode_dirty(handle, inode);
  1100. if (res)
  1101. EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
  1102. }
  1103. res2 = ext4_journal_stop(handle);
  1104. if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  1105. goto retry;
  1106. if (!res)
  1107. res = res2;
  1108. return res;
  1109. }
  1110. static bool ext4_dummy_context(struct inode *inode)
  1111. {
  1112. return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
  1113. }
  1114. static const struct fscrypt_operations ext4_cryptops = {
  1115. .key_prefix = "ext4:",
  1116. .get_context = ext4_get_context,
  1117. .set_context = ext4_set_context,
  1118. .dummy_context = ext4_dummy_context,
  1119. .empty_dir = ext4_empty_dir,
  1120. .max_namelen = EXT4_NAME_LEN,
  1121. };
  1122. #endif
  1123. #ifdef CONFIG_QUOTA
  1124. static const char * const quotatypes[] = INITQFNAMES;
  1125. #define QTYPE2NAME(t) (quotatypes[t])
  1126. static int ext4_write_dquot(struct dquot *dquot);
  1127. static int ext4_acquire_dquot(struct dquot *dquot);
  1128. static int ext4_release_dquot(struct dquot *dquot);
  1129. static int ext4_mark_dquot_dirty(struct dquot *dquot);
  1130. static int ext4_write_info(struct super_block *sb, int type);
  1131. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  1132. const struct path *path);
  1133. static int ext4_quota_on_mount(struct super_block *sb, int type);
  1134. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  1135. size_t len, loff_t off);
  1136. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  1137. const char *data, size_t len, loff_t off);
  1138. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  1139. unsigned int flags);
  1140. static int ext4_enable_quotas(struct super_block *sb);
  1141. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
  1142. static struct dquot **ext4_get_dquots(struct inode *inode)
  1143. {
  1144. return EXT4_I(inode)->i_dquot;
  1145. }
  1146. static const struct dquot_operations ext4_quota_operations = {
  1147. .get_reserved_space = ext4_get_reserved_space,
  1148. .write_dquot = ext4_write_dquot,
  1149. .acquire_dquot = ext4_acquire_dquot,
  1150. .release_dquot = ext4_release_dquot,
  1151. .mark_dirty = ext4_mark_dquot_dirty,
  1152. .write_info = ext4_write_info,
  1153. .alloc_dquot = dquot_alloc,
  1154. .destroy_dquot = dquot_destroy,
  1155. .get_projid = ext4_get_projid,
  1156. .get_inode_usage = ext4_get_inode_usage,
  1157. .get_next_id = ext4_get_next_id,
  1158. };
  1159. static const struct quotactl_ops ext4_qctl_operations = {
  1160. .quota_on = ext4_quota_on,
  1161. .quota_off = ext4_quota_off,
  1162. .quota_sync = dquot_quota_sync,
  1163. .get_state = dquot_get_state,
  1164. .set_info = dquot_set_dqinfo,
  1165. .get_dqblk = dquot_get_dqblk,
  1166. .set_dqblk = dquot_set_dqblk,
  1167. .get_nextdqblk = dquot_get_next_dqblk,
  1168. };
  1169. #endif
  1170. static const struct super_operations ext4_sops = {
  1171. .alloc_inode = ext4_alloc_inode,
  1172. .destroy_inode = ext4_destroy_inode,
  1173. .write_inode = ext4_write_inode,
  1174. .dirty_inode = ext4_dirty_inode,
  1175. .drop_inode = ext4_drop_inode,
  1176. .evict_inode = ext4_evict_inode,
  1177. .put_super = ext4_put_super,
  1178. .sync_fs = ext4_sync_fs,
  1179. .freeze_fs = ext4_freeze,
  1180. .unfreeze_fs = ext4_unfreeze,
  1181. .statfs = ext4_statfs,
  1182. .remount_fs = ext4_remount,
  1183. .show_options = ext4_show_options,
  1184. #ifdef CONFIG_QUOTA
  1185. .quota_read = ext4_quota_read,
  1186. .quota_write = ext4_quota_write,
  1187. .get_dquots = ext4_get_dquots,
  1188. #endif
  1189. .bdev_try_to_free_page = bdev_try_to_free_page,
  1190. };
  1191. static const struct export_operations ext4_export_ops = {
  1192. .fh_to_dentry = ext4_fh_to_dentry,
  1193. .fh_to_parent = ext4_fh_to_parent,
  1194. .get_parent = ext4_get_parent,
  1195. };
  1196. enum {
  1197. Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
  1198. Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
  1199. Opt_nouid32, Opt_debug, Opt_removed,
  1200. Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
  1201. Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
  1202. Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
  1203. Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
  1204. Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
  1205. Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
  1206. Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
  1207. Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
  1208. Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
  1209. Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
  1210. Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
  1211. Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
  1212. Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
  1213. Opt_inode_readahead_blks, Opt_journal_ioprio,
  1214. Opt_dioread_nolock, Opt_dioread_lock,
  1215. Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
  1216. Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
  1217. };
  1218. static const match_table_t tokens = {
  1219. {Opt_bsd_df, "bsddf"},
  1220. {Opt_minix_df, "minixdf"},
  1221. {Opt_grpid, "grpid"},
  1222. {Opt_grpid, "bsdgroups"},
  1223. {Opt_nogrpid, "nogrpid"},
  1224. {Opt_nogrpid, "sysvgroups"},
  1225. {Opt_resgid, "resgid=%u"},
  1226. {Opt_resuid, "resuid=%u"},
  1227. {Opt_sb, "sb=%u"},
  1228. {Opt_err_cont, "errors=continue"},
  1229. {Opt_err_panic, "errors=panic"},
  1230. {Opt_err_ro, "errors=remount-ro"},
  1231. {Opt_nouid32, "nouid32"},
  1232. {Opt_debug, "debug"},
  1233. {Opt_removed, "oldalloc"},
  1234. {Opt_removed, "orlov"},
  1235. {Opt_user_xattr, "user_xattr"},
  1236. {Opt_nouser_xattr, "nouser_xattr"},
  1237. {Opt_acl, "acl"},
  1238. {Opt_noacl, "noacl"},
  1239. {Opt_noload, "norecovery"},
  1240. {Opt_noload, "noload"},
  1241. {Opt_removed, "nobh"},
  1242. {Opt_removed, "bh"},
  1243. {Opt_commit, "commit=%u"},
  1244. {Opt_min_batch_time, "min_batch_time=%u"},
  1245. {Opt_max_batch_time, "max_batch_time=%u"},
  1246. {Opt_journal_dev, "journal_dev=%u"},
  1247. {Opt_journal_path, "journal_path=%s"},
  1248. {Opt_journal_checksum, "journal_checksum"},
  1249. {Opt_nojournal_checksum, "nojournal_checksum"},
  1250. {Opt_journal_async_commit, "journal_async_commit"},
  1251. {Opt_abort, "abort"},
  1252. {Opt_data_journal, "data=journal"},
  1253. {Opt_data_ordered, "data=ordered"},
  1254. {Opt_data_writeback, "data=writeback"},
  1255. {Opt_data_err_abort, "data_err=abort"},
  1256. {Opt_data_err_ignore, "data_err=ignore"},
  1257. {Opt_offusrjquota, "usrjquota="},
  1258. {Opt_usrjquota, "usrjquota=%s"},
  1259. {Opt_offgrpjquota, "grpjquota="},
  1260. {Opt_grpjquota, "grpjquota=%s"},
  1261. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  1262. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  1263. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  1264. {Opt_grpquota, "grpquota"},
  1265. {Opt_noquota, "noquota"},
  1266. {Opt_quota, "quota"},
  1267. {Opt_usrquota, "usrquota"},
  1268. {Opt_prjquota, "prjquota"},
  1269. {Opt_barrier, "barrier=%u"},
  1270. {Opt_barrier, "barrier"},
  1271. {Opt_nobarrier, "nobarrier"},
  1272. {Opt_i_version, "i_version"},
  1273. {Opt_dax, "dax"},
  1274. {Opt_stripe, "stripe=%u"},
  1275. {Opt_delalloc, "delalloc"},
  1276. {Opt_lazytime, "lazytime"},
  1277. {Opt_nolazytime, "nolazytime"},
  1278. {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
  1279. {Opt_nodelalloc, "nodelalloc"},
  1280. {Opt_removed, "mblk_io_submit"},
  1281. {Opt_removed, "nomblk_io_submit"},
  1282. {Opt_block_validity, "block_validity"},
  1283. {Opt_noblock_validity, "noblock_validity"},
  1284. {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
  1285. {Opt_journal_ioprio, "journal_ioprio=%u"},
  1286. {Opt_auto_da_alloc, "auto_da_alloc=%u"},
  1287. {Opt_auto_da_alloc, "auto_da_alloc"},
  1288. {Opt_noauto_da_alloc, "noauto_da_alloc"},
  1289. {Opt_dioread_nolock, "dioread_nolock"},
  1290. {Opt_dioread_lock, "dioread_lock"},
  1291. {Opt_discard, "discard"},
  1292. {Opt_nodiscard, "nodiscard"},
  1293. {Opt_init_itable, "init_itable=%u"},
  1294. {Opt_init_itable, "init_itable"},
  1295. {Opt_noinit_itable, "noinit_itable"},
  1296. {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
  1297. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  1298. {Opt_nombcache, "nombcache"},
  1299. {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
  1300. {Opt_removed, "check=none"}, /* mount option from ext2/3 */
  1301. {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
  1302. {Opt_removed, "reservation"}, /* mount option from ext2/3 */
  1303. {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
  1304. {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
  1305. {Opt_err, NULL},
  1306. };
  1307. static ext4_fsblk_t get_sb_block(void **data)
  1308. {
  1309. ext4_fsblk_t sb_block;
  1310. char *options = (char *) *data;
  1311. if (!options || strncmp(options, "sb=", 3) != 0)
  1312. return 1; /* Default location */
  1313. options += 3;
  1314. /* TODO: use simple_strtoll with >32bit ext4 */
  1315. sb_block = simple_strtoul(options, &options, 0);
  1316. if (*options && *options != ',') {
  1317. printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
  1318. (char *) *data);
  1319. return 1;
  1320. }
  1321. if (*options == ',')
  1322. options++;
  1323. *data = (void *) options;
  1324. return sb_block;
  1325. }
  1326. #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
  1327. static const char deprecated_msg[] =
  1328. "Mount option \"%s\" will be removed by %s\n"
  1329. "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
  1330. #ifdef CONFIG_QUOTA
  1331. static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
  1332. {
  1333. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1334. char *qname;
  1335. int ret = -1;
  1336. if (sb_any_quota_loaded(sb) &&
  1337. !sbi->s_qf_names[qtype]) {
  1338. ext4_msg(sb, KERN_ERR,
  1339. "Cannot change journaled "
  1340. "quota options when quota turned on");
  1341. return -1;
  1342. }
  1343. if (ext4_has_feature_quota(sb)) {
  1344. ext4_msg(sb, KERN_INFO, "Journaled quota options "
  1345. "ignored when QUOTA feature is enabled");
  1346. return 1;
  1347. }
  1348. qname = match_strdup(args);
  1349. if (!qname) {
  1350. ext4_msg(sb, KERN_ERR,
  1351. "Not enough memory for storing quotafile name");
  1352. return -1;
  1353. }
  1354. if (sbi->s_qf_names[qtype]) {
  1355. if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
  1356. ret = 1;
  1357. else
  1358. ext4_msg(sb, KERN_ERR,
  1359. "%s quota file already specified",
  1360. QTYPE2NAME(qtype));
  1361. goto errout;
  1362. }
  1363. if (strchr(qname, '/')) {
  1364. ext4_msg(sb, KERN_ERR,
  1365. "quotafile must be on filesystem root");
  1366. goto errout;
  1367. }
  1368. sbi->s_qf_names[qtype] = qname;
  1369. set_opt(sb, QUOTA);
  1370. return 1;
  1371. errout:
  1372. kfree(qname);
  1373. return ret;
  1374. }
  1375. static int clear_qf_name(struct super_block *sb, int qtype)
  1376. {
  1377. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1378. if (sb_any_quota_loaded(sb) &&
  1379. sbi->s_qf_names[qtype]) {
  1380. ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
  1381. " when quota turned on");
  1382. return -1;
  1383. }
  1384. kfree(sbi->s_qf_names[qtype]);
  1385. sbi->s_qf_names[qtype] = NULL;
  1386. return 1;
  1387. }
  1388. #endif
  1389. #define MOPT_SET 0x0001
  1390. #define MOPT_CLEAR 0x0002
  1391. #define MOPT_NOSUPPORT 0x0004
  1392. #define MOPT_EXPLICIT 0x0008
  1393. #define MOPT_CLEAR_ERR 0x0010
  1394. #define MOPT_GTE0 0x0020
  1395. #ifdef CONFIG_QUOTA
  1396. #define MOPT_Q 0
  1397. #define MOPT_QFMT 0x0040
  1398. #else
  1399. #define MOPT_Q MOPT_NOSUPPORT
  1400. #define MOPT_QFMT MOPT_NOSUPPORT
  1401. #endif
  1402. #define MOPT_DATAJ 0x0080
  1403. #define MOPT_NO_EXT2 0x0100
  1404. #define MOPT_NO_EXT3 0x0200
  1405. #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
  1406. #define MOPT_STRING 0x0400
  1407. static const struct mount_opts {
  1408. int token;
  1409. int mount_opt;
  1410. int flags;
  1411. } ext4_mount_opts[] = {
  1412. {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
  1413. {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
  1414. {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
  1415. {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
  1416. {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
  1417. {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
  1418. {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1419. MOPT_EXT4_ONLY | MOPT_SET},
  1420. {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1421. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1422. {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
  1423. {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
  1424. {Opt_delalloc, EXT4_MOUNT_DELALLOC,
  1425. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1426. {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
  1427. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1428. {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1429. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1430. {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1431. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1432. {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
  1433. EXT4_MOUNT_JOURNAL_CHECKSUM),
  1434. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1435. {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
  1436. {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
  1437. {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
  1438. {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
  1439. {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
  1440. MOPT_NO_EXT2},
  1441. {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
  1442. MOPT_NO_EXT2},
  1443. {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
  1444. {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
  1445. {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
  1446. {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
  1447. {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
  1448. {Opt_commit, 0, MOPT_GTE0},
  1449. {Opt_max_batch_time, 0, MOPT_GTE0},
  1450. {Opt_min_batch_time, 0, MOPT_GTE0},
  1451. {Opt_inode_readahead_blks, 0, MOPT_GTE0},
  1452. {Opt_init_itable, 0, MOPT_GTE0},
  1453. {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
  1454. {Opt_stripe, 0, MOPT_GTE0},
  1455. {Opt_resuid, 0, MOPT_GTE0},
  1456. {Opt_resgid, 0, MOPT_GTE0},
  1457. {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1458. {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
  1459. {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1460. {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1461. {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1462. {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
  1463. MOPT_NO_EXT2 | MOPT_DATAJ},
  1464. {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
  1465. {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
  1466. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  1467. {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
  1468. {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
  1469. #else
  1470. {Opt_acl, 0, MOPT_NOSUPPORT},
  1471. {Opt_noacl, 0, MOPT_NOSUPPORT},
  1472. #endif
  1473. {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
  1474. {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
  1475. {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
  1476. {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
  1477. {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
  1478. MOPT_SET | MOPT_Q},
  1479. {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
  1480. MOPT_SET | MOPT_Q},
  1481. {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
  1482. MOPT_SET | MOPT_Q},
  1483. {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
  1484. EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
  1485. MOPT_CLEAR | MOPT_Q},
  1486. {Opt_usrjquota, 0, MOPT_Q},
  1487. {Opt_grpjquota, 0, MOPT_Q},
  1488. {Opt_offusrjquota, 0, MOPT_Q},
  1489. {Opt_offgrpjquota, 0, MOPT_Q},
  1490. {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
  1491. {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
  1492. {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
  1493. {Opt_max_dir_size_kb, 0, MOPT_GTE0},
  1494. {Opt_test_dummy_encryption, 0, MOPT_GTE0},
  1495. {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
  1496. {Opt_err, 0, 0}
  1497. };
  1498. static int handle_mount_opt(struct super_block *sb, char *opt, int token,
  1499. substring_t *args, unsigned long *journal_devnum,
  1500. unsigned int *journal_ioprio, int is_remount)
  1501. {
  1502. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1503. const struct mount_opts *m;
  1504. kuid_t uid;
  1505. kgid_t gid;
  1506. int arg = 0;
  1507. #ifdef CONFIG_QUOTA
  1508. if (token == Opt_usrjquota)
  1509. return set_qf_name(sb, USRQUOTA, &args[0]);
  1510. else if (token == Opt_grpjquota)
  1511. return set_qf_name(sb, GRPQUOTA, &args[0]);
  1512. else if (token == Opt_offusrjquota)
  1513. return clear_qf_name(sb, USRQUOTA);
  1514. else if (token == Opt_offgrpjquota)
  1515. return clear_qf_name(sb, GRPQUOTA);
  1516. #endif
  1517. switch (token) {
  1518. case Opt_noacl:
  1519. case Opt_nouser_xattr:
  1520. ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
  1521. break;
  1522. case Opt_sb:
  1523. return 1; /* handled by get_sb_block() */
  1524. case Opt_removed:
  1525. ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
  1526. return 1;
  1527. case Opt_abort:
  1528. sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
  1529. return 1;
  1530. case Opt_i_version:
  1531. sb->s_flags |= SB_I_VERSION;
  1532. return 1;
  1533. case Opt_lazytime:
  1534. sb->s_flags |= SB_LAZYTIME;
  1535. return 1;
  1536. case Opt_nolazytime:
  1537. sb->s_flags &= ~SB_LAZYTIME;
  1538. return 1;
  1539. }
  1540. for (m = ext4_mount_opts; m->token != Opt_err; m++)
  1541. if (token == m->token)
  1542. break;
  1543. if (m->token == Opt_err) {
  1544. ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
  1545. "or missing value", opt);
  1546. return -1;
  1547. }
  1548. if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
  1549. ext4_msg(sb, KERN_ERR,
  1550. "Mount option \"%s\" incompatible with ext2", opt);
  1551. return -1;
  1552. }
  1553. if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
  1554. ext4_msg(sb, KERN_ERR,
  1555. "Mount option \"%s\" incompatible with ext3", opt);
  1556. return -1;
  1557. }
  1558. if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
  1559. return -1;
  1560. if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
  1561. return -1;
  1562. if (m->flags & MOPT_EXPLICIT) {
  1563. if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
  1564. set_opt2(sb, EXPLICIT_DELALLOC);
  1565. } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
  1566. set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
  1567. } else
  1568. return -1;
  1569. }
  1570. if (m->flags & MOPT_CLEAR_ERR)
  1571. clear_opt(sb, ERRORS_MASK);
  1572. if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
  1573. ext4_msg(sb, KERN_ERR, "Cannot change quota "
  1574. "options when quota turned on");
  1575. return -1;
  1576. }
  1577. if (m->flags & MOPT_NOSUPPORT) {
  1578. ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
  1579. } else if (token == Opt_commit) {
  1580. if (arg == 0)
  1581. arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
  1582. sbi->s_commit_interval = HZ * arg;
  1583. } else if (token == Opt_debug_want_extra_isize) {
  1584. sbi->s_want_extra_isize = arg;
  1585. } else if (token == Opt_max_batch_time) {
  1586. sbi->s_max_batch_time = arg;
  1587. } else if (token == Opt_min_batch_time) {
  1588. sbi->s_min_batch_time = arg;
  1589. } else if (token == Opt_inode_readahead_blks) {
  1590. if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
  1591. ext4_msg(sb, KERN_ERR,
  1592. "EXT4-fs: inode_readahead_blks must be "
  1593. "0 or a power of 2 smaller than 2^31");
  1594. return -1;
  1595. }
  1596. sbi->s_inode_readahead_blks = arg;
  1597. } else if (token == Opt_init_itable) {
  1598. set_opt(sb, INIT_INODE_TABLE);
  1599. if (!args->from)
  1600. arg = EXT4_DEF_LI_WAIT_MULT;
  1601. sbi->s_li_wait_mult = arg;
  1602. } else if (token == Opt_max_dir_size_kb) {
  1603. sbi->s_max_dir_size_kb = arg;
  1604. } else if (token == Opt_stripe) {
  1605. sbi->s_stripe = arg;
  1606. } else if (token == Opt_resuid) {
  1607. uid = make_kuid(current_user_ns(), arg);
  1608. if (!uid_valid(uid)) {
  1609. ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
  1610. return -1;
  1611. }
  1612. sbi->s_resuid = uid;
  1613. } else if (token == Opt_resgid) {
  1614. gid = make_kgid(current_user_ns(), arg);
  1615. if (!gid_valid(gid)) {
  1616. ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
  1617. return -1;
  1618. }
  1619. sbi->s_resgid = gid;
  1620. } else if (token == Opt_journal_dev) {
  1621. if (is_remount) {
  1622. ext4_msg(sb, KERN_ERR,
  1623. "Cannot specify journal on remount");
  1624. return -1;
  1625. }
  1626. *journal_devnum = arg;
  1627. } else if (token == Opt_journal_path) {
  1628. char *journal_path;
  1629. struct inode *journal_inode;
  1630. struct path path;
  1631. int error;
  1632. if (is_remount) {
  1633. ext4_msg(sb, KERN_ERR,
  1634. "Cannot specify journal on remount");
  1635. return -1;
  1636. }
  1637. journal_path = match_strdup(&args[0]);
  1638. if (!journal_path) {
  1639. ext4_msg(sb, KERN_ERR, "error: could not dup "
  1640. "journal device string");
  1641. return -1;
  1642. }
  1643. error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
  1644. if (error) {
  1645. ext4_msg(sb, KERN_ERR, "error: could not find "
  1646. "journal device path: error %d", error);
  1647. kfree(journal_path);
  1648. return -1;
  1649. }
  1650. journal_inode = d_inode(path.dentry);
  1651. if (!S_ISBLK(journal_inode->i_mode)) {
  1652. ext4_msg(sb, KERN_ERR, "error: journal path %s "
  1653. "is not a block device", journal_path);
  1654. path_put(&path);
  1655. kfree(journal_path);
  1656. return -1;
  1657. }
  1658. *journal_devnum = new_encode_dev(journal_inode->i_rdev);
  1659. path_put(&path);
  1660. kfree(journal_path);
  1661. } else if (token == Opt_journal_ioprio) {
  1662. if (arg > 7) {
  1663. ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
  1664. " (must be 0-7)");
  1665. return -1;
  1666. }
  1667. *journal_ioprio =
  1668. IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
  1669. } else if (token == Opt_test_dummy_encryption) {
  1670. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1671. sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
  1672. ext4_msg(sb, KERN_WARNING,
  1673. "Test dummy encryption mode enabled");
  1674. #else
  1675. ext4_msg(sb, KERN_WARNING,
  1676. "Test dummy encryption mount option ignored");
  1677. #endif
  1678. } else if (m->flags & MOPT_DATAJ) {
  1679. if (is_remount) {
  1680. if (!sbi->s_journal)
  1681. ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
  1682. else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
  1683. ext4_msg(sb, KERN_ERR,
  1684. "Cannot change data mode on remount");
  1685. return -1;
  1686. }
  1687. } else {
  1688. clear_opt(sb, DATA_FLAGS);
  1689. sbi->s_mount_opt |= m->mount_opt;
  1690. }
  1691. #ifdef CONFIG_QUOTA
  1692. } else if (m->flags & MOPT_QFMT) {
  1693. if (sb_any_quota_loaded(sb) &&
  1694. sbi->s_jquota_fmt != m->mount_opt) {
  1695. ext4_msg(sb, KERN_ERR, "Cannot change journaled "
  1696. "quota options when quota turned on");
  1697. return -1;
  1698. }
  1699. if (ext4_has_feature_quota(sb)) {
  1700. ext4_msg(sb, KERN_INFO,
  1701. "Quota format mount options ignored "
  1702. "when QUOTA feature is enabled");
  1703. return 1;
  1704. }
  1705. sbi->s_jquota_fmt = m->mount_opt;
  1706. #endif
  1707. } else if (token == Opt_dax) {
  1708. #ifdef CONFIG_FS_DAX
  1709. ext4_msg(sb, KERN_WARNING,
  1710. "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
  1711. sbi->s_mount_opt |= m->mount_opt;
  1712. #else
  1713. ext4_msg(sb, KERN_INFO, "dax option not supported");
  1714. return -1;
  1715. #endif
  1716. } else if (token == Opt_data_err_abort) {
  1717. sbi->s_mount_opt |= m->mount_opt;
  1718. } else if (token == Opt_data_err_ignore) {
  1719. sbi->s_mount_opt &= ~m->mount_opt;
  1720. } else {
  1721. if (!args->from)
  1722. arg = 1;
  1723. if (m->flags & MOPT_CLEAR)
  1724. arg = !arg;
  1725. else if (unlikely(!(m->flags & MOPT_SET))) {
  1726. ext4_msg(sb, KERN_WARNING,
  1727. "buggy handling of option %s", opt);
  1728. WARN_ON(1);
  1729. return -1;
  1730. }
  1731. if (arg != 0)
  1732. sbi->s_mount_opt |= m->mount_opt;
  1733. else
  1734. sbi->s_mount_opt &= ~m->mount_opt;
  1735. }
  1736. return 1;
  1737. }
  1738. static int parse_options(char *options, struct super_block *sb,
  1739. unsigned long *journal_devnum,
  1740. unsigned int *journal_ioprio,
  1741. int is_remount)
  1742. {
  1743. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1744. char *p;
  1745. substring_t args[MAX_OPT_ARGS];
  1746. int token;
  1747. if (!options)
  1748. return 1;
  1749. while ((p = strsep(&options, ",")) != NULL) {
  1750. if (!*p)
  1751. continue;
  1752. /*
  1753. * Initialize args struct so we know whether arg was
  1754. * found; some options take optional arguments.
  1755. */
  1756. args[0].to = args[0].from = NULL;
  1757. token = match_token(p, tokens, args);
  1758. if (handle_mount_opt(sb, p, token, args, journal_devnum,
  1759. journal_ioprio, is_remount) < 0)
  1760. return 0;
  1761. }
  1762. #ifdef CONFIG_QUOTA
  1763. /*
  1764. * We do the test below only for project quotas. 'usrquota' and
  1765. * 'grpquota' mount options are allowed even without quota feature
  1766. * to support legacy quotas in quota files.
  1767. */
  1768. if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
  1769. ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
  1770. "Cannot enable project quota enforcement.");
  1771. return 0;
  1772. }
  1773. if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  1774. if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
  1775. clear_opt(sb, USRQUOTA);
  1776. if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
  1777. clear_opt(sb, GRPQUOTA);
  1778. if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
  1779. ext4_msg(sb, KERN_ERR, "old and new quota "
  1780. "format mixing");
  1781. return 0;
  1782. }
  1783. if (!sbi->s_jquota_fmt) {
  1784. ext4_msg(sb, KERN_ERR, "journaled quota format "
  1785. "not specified");
  1786. return 0;
  1787. }
  1788. }
  1789. #endif
  1790. if (test_opt(sb, DIOREAD_NOLOCK)) {
  1791. int blocksize =
  1792. BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
  1793. if (blocksize < PAGE_SIZE) {
  1794. ext4_msg(sb, KERN_ERR, "can't mount with "
  1795. "dioread_nolock if block size != PAGE_SIZE");
  1796. return 0;
  1797. }
  1798. }
  1799. return 1;
  1800. }
  1801. static inline void ext4_show_quota_options(struct seq_file *seq,
  1802. struct super_block *sb)
  1803. {
  1804. #if defined(CONFIG_QUOTA)
  1805. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1806. if (sbi->s_jquota_fmt) {
  1807. char *fmtname = "";
  1808. switch (sbi->s_jquota_fmt) {
  1809. case QFMT_VFS_OLD:
  1810. fmtname = "vfsold";
  1811. break;
  1812. case QFMT_VFS_V0:
  1813. fmtname = "vfsv0";
  1814. break;
  1815. case QFMT_VFS_V1:
  1816. fmtname = "vfsv1";
  1817. break;
  1818. }
  1819. seq_printf(seq, ",jqfmt=%s", fmtname);
  1820. }
  1821. if (sbi->s_qf_names[USRQUOTA])
  1822. seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
  1823. if (sbi->s_qf_names[GRPQUOTA])
  1824. seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
  1825. #endif
  1826. }
  1827. static const char *token2str(int token)
  1828. {
  1829. const struct match_token *t;
  1830. for (t = tokens; t->token != Opt_err; t++)
  1831. if (t->token == token && !strchr(t->pattern, '='))
  1832. break;
  1833. return t->pattern;
  1834. }
  1835. /*
  1836. * Show an option if
  1837. * - it's set to a non-default value OR
  1838. * - if the per-sb default is different from the global default
  1839. */
  1840. static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
  1841. int nodefs)
  1842. {
  1843. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1844. struct ext4_super_block *es = sbi->s_es;
  1845. int def_errors, def_mount_opt = sbi->s_def_mount_opt;
  1846. const struct mount_opts *m;
  1847. char sep = nodefs ? '\n' : ',';
  1848. #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
  1849. #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
  1850. if (sbi->s_sb_block != 1)
  1851. SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
  1852. for (m = ext4_mount_opts; m->token != Opt_err; m++) {
  1853. int want_set = m->flags & MOPT_SET;
  1854. if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
  1855. (m->flags & MOPT_CLEAR_ERR))
  1856. continue;
  1857. if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
  1858. continue; /* skip if same as the default */
  1859. if ((want_set &&
  1860. (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
  1861. (!want_set && (sbi->s_mount_opt & m->mount_opt)))
  1862. continue; /* select Opt_noFoo vs Opt_Foo */
  1863. SEQ_OPTS_PRINT("%s", token2str(m->token));
  1864. }
  1865. if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
  1866. le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
  1867. SEQ_OPTS_PRINT("resuid=%u",
  1868. from_kuid_munged(&init_user_ns, sbi->s_resuid));
  1869. if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
  1870. le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
  1871. SEQ_OPTS_PRINT("resgid=%u",
  1872. from_kgid_munged(&init_user_ns, sbi->s_resgid));
  1873. def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
  1874. if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
  1875. SEQ_OPTS_PUTS("errors=remount-ro");
  1876. if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
  1877. SEQ_OPTS_PUTS("errors=continue");
  1878. if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
  1879. SEQ_OPTS_PUTS("errors=panic");
  1880. if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
  1881. SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
  1882. if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
  1883. SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
  1884. if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
  1885. SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
  1886. if (sb->s_flags & SB_I_VERSION)
  1887. SEQ_OPTS_PUTS("i_version");
  1888. if (nodefs || sbi->s_stripe)
  1889. SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
  1890. if (nodefs || EXT4_MOUNT_DATA_FLAGS &
  1891. (sbi->s_mount_opt ^ def_mount_opt)) {
  1892. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  1893. SEQ_OPTS_PUTS("data=journal");
  1894. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  1895. SEQ_OPTS_PUTS("data=ordered");
  1896. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
  1897. SEQ_OPTS_PUTS("data=writeback");
  1898. }
  1899. if (nodefs ||
  1900. sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
  1901. SEQ_OPTS_PRINT("inode_readahead_blks=%u",
  1902. sbi->s_inode_readahead_blks);
  1903. if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
  1904. (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
  1905. SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
  1906. if (nodefs || sbi->s_max_dir_size_kb)
  1907. SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
  1908. if (test_opt(sb, DATA_ERR_ABORT))
  1909. SEQ_OPTS_PUTS("data_err=abort");
  1910. ext4_show_quota_options(seq, sb);
  1911. return 0;
  1912. }
  1913. static int ext4_show_options(struct seq_file *seq, struct dentry *root)
  1914. {
  1915. return _ext4_show_options(seq, root->d_sb, 0);
  1916. }
  1917. int ext4_seq_options_show(struct seq_file *seq, void *offset)
  1918. {
  1919. struct super_block *sb = seq->private;
  1920. int rc;
  1921. seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
  1922. rc = _ext4_show_options(seq, sb, 1);
  1923. seq_puts(seq, "\n");
  1924. return rc;
  1925. }
  1926. static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
  1927. int read_only)
  1928. {
  1929. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1930. int err = 0;
  1931. if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
  1932. ext4_msg(sb, KERN_ERR, "revision level too high, "
  1933. "forcing read-only mode");
  1934. err = -EROFS;
  1935. }
  1936. if (read_only)
  1937. goto done;
  1938. if (!(sbi->s_mount_state & EXT4_VALID_FS))
  1939. ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
  1940. "running e2fsck is recommended");
  1941. else if (sbi->s_mount_state & EXT4_ERROR_FS)
  1942. ext4_msg(sb, KERN_WARNING,
  1943. "warning: mounting fs with errors, "
  1944. "running e2fsck is recommended");
  1945. else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
  1946. le16_to_cpu(es->s_mnt_count) >=
  1947. (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
  1948. ext4_msg(sb, KERN_WARNING,
  1949. "warning: maximal mount count reached, "
  1950. "running e2fsck is recommended");
  1951. else if (le32_to_cpu(es->s_checkinterval) &&
  1952. (le32_to_cpu(es->s_lastcheck) +
  1953. le32_to_cpu(es->s_checkinterval) <= get_seconds()))
  1954. ext4_msg(sb, KERN_WARNING,
  1955. "warning: checktime reached, "
  1956. "running e2fsck is recommended");
  1957. if (!sbi->s_journal)
  1958. es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  1959. if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
  1960. es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
  1961. le16_add_cpu(&es->s_mnt_count, 1);
  1962. es->s_mtime = cpu_to_le32(get_seconds());
  1963. ext4_update_dynamic_rev(sb);
  1964. if (sbi->s_journal)
  1965. ext4_set_feature_journal_needs_recovery(sb);
  1966. err = ext4_commit_super(sb, 1);
  1967. done:
  1968. if (test_opt(sb, DEBUG))
  1969. printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
  1970. "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
  1971. sb->s_blocksize,
  1972. sbi->s_groups_count,
  1973. EXT4_BLOCKS_PER_GROUP(sb),
  1974. EXT4_INODES_PER_GROUP(sb),
  1975. sbi->s_mount_opt, sbi->s_mount_opt2);
  1976. cleancache_init_fs(sb);
  1977. return err;
  1978. }
  1979. int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
  1980. {
  1981. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1982. struct flex_groups *new_groups;
  1983. int size;
  1984. if (!sbi->s_log_groups_per_flex)
  1985. return 0;
  1986. size = ext4_flex_group(sbi, ngroup - 1) + 1;
  1987. if (size <= sbi->s_flex_groups_allocated)
  1988. return 0;
  1989. size = roundup_pow_of_two(size * sizeof(struct flex_groups));
  1990. new_groups = kvzalloc(size, GFP_KERNEL);
  1991. if (!new_groups) {
  1992. ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
  1993. size / (int) sizeof(struct flex_groups));
  1994. return -ENOMEM;
  1995. }
  1996. if (sbi->s_flex_groups) {
  1997. memcpy(new_groups, sbi->s_flex_groups,
  1998. (sbi->s_flex_groups_allocated *
  1999. sizeof(struct flex_groups)));
  2000. kvfree(sbi->s_flex_groups);
  2001. }
  2002. sbi->s_flex_groups = new_groups;
  2003. sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
  2004. return 0;
  2005. }
  2006. static int ext4_fill_flex_info(struct super_block *sb)
  2007. {
  2008. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2009. struct ext4_group_desc *gdp = NULL;
  2010. ext4_group_t flex_group;
  2011. int i, err;
  2012. sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
  2013. if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
  2014. sbi->s_log_groups_per_flex = 0;
  2015. return 1;
  2016. }
  2017. err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
  2018. if (err)
  2019. goto failed;
  2020. for (i = 0; i < sbi->s_groups_count; i++) {
  2021. gdp = ext4_get_group_desc(sb, i, NULL);
  2022. flex_group = ext4_flex_group(sbi, i);
  2023. atomic_add(ext4_free_inodes_count(sb, gdp),
  2024. &sbi->s_flex_groups[flex_group].free_inodes);
  2025. atomic64_add(ext4_free_group_clusters(sb, gdp),
  2026. &sbi->s_flex_groups[flex_group].free_clusters);
  2027. atomic_add(ext4_used_dirs_count(sb, gdp),
  2028. &sbi->s_flex_groups[flex_group].used_dirs);
  2029. }
  2030. return 1;
  2031. failed:
  2032. return 0;
  2033. }
  2034. static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
  2035. struct ext4_group_desc *gdp)
  2036. {
  2037. int offset = offsetof(struct ext4_group_desc, bg_checksum);
  2038. __u16 crc = 0;
  2039. __le32 le_group = cpu_to_le32(block_group);
  2040. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2041. if (ext4_has_metadata_csum(sbi->s_sb)) {
  2042. /* Use new metadata_csum algorithm */
  2043. __u32 csum32;
  2044. __u16 dummy_csum = 0;
  2045. csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
  2046. sizeof(le_group));
  2047. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
  2048. csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
  2049. sizeof(dummy_csum));
  2050. offset += sizeof(dummy_csum);
  2051. if (offset < sbi->s_desc_size)
  2052. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
  2053. sbi->s_desc_size - offset);
  2054. crc = csum32 & 0xFFFF;
  2055. goto out;
  2056. }
  2057. /* old crc16 code */
  2058. if (!ext4_has_feature_gdt_csum(sb))
  2059. return 0;
  2060. crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
  2061. crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
  2062. crc = crc16(crc, (__u8 *)gdp, offset);
  2063. offset += sizeof(gdp->bg_checksum); /* skip checksum */
  2064. /* for checksum of struct ext4_group_desc do the rest...*/
  2065. if (ext4_has_feature_64bit(sb) &&
  2066. offset < le16_to_cpu(sbi->s_es->s_desc_size))
  2067. crc = crc16(crc, (__u8 *)gdp + offset,
  2068. le16_to_cpu(sbi->s_es->s_desc_size) -
  2069. offset);
  2070. out:
  2071. return cpu_to_le16(crc);
  2072. }
  2073. int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
  2074. struct ext4_group_desc *gdp)
  2075. {
  2076. if (ext4_has_group_desc_csum(sb) &&
  2077. (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
  2078. return 0;
  2079. return 1;
  2080. }
  2081. void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
  2082. struct ext4_group_desc *gdp)
  2083. {
  2084. if (!ext4_has_group_desc_csum(sb))
  2085. return;
  2086. gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
  2087. }
  2088. /* Called at mount-time, super-block is locked */
  2089. static int ext4_check_descriptors(struct super_block *sb,
  2090. ext4_fsblk_t sb_block,
  2091. ext4_group_t *first_not_zeroed)
  2092. {
  2093. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2094. ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
  2095. ext4_fsblk_t last_block;
  2096. ext4_fsblk_t block_bitmap;
  2097. ext4_fsblk_t inode_bitmap;
  2098. ext4_fsblk_t inode_table;
  2099. int flexbg_flag = 0;
  2100. ext4_group_t i, grp = sbi->s_groups_count;
  2101. if (ext4_has_feature_flex_bg(sb))
  2102. flexbg_flag = 1;
  2103. ext4_debug("Checking group descriptors");
  2104. for (i = 0; i < sbi->s_groups_count; i++) {
  2105. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  2106. if (i == sbi->s_groups_count - 1 || flexbg_flag)
  2107. last_block = ext4_blocks_count(sbi->s_es) - 1;
  2108. else
  2109. last_block = first_block +
  2110. (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  2111. if ((grp == sbi->s_groups_count) &&
  2112. !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2113. grp = i;
  2114. block_bitmap = ext4_block_bitmap(sb, gdp);
  2115. if (block_bitmap == sb_block) {
  2116. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2117. "Block bitmap for group %u overlaps "
  2118. "superblock", i);
  2119. if (!sb_rdonly(sb))
  2120. return 0;
  2121. }
  2122. if (block_bitmap < first_block || block_bitmap > last_block) {
  2123. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2124. "Block bitmap for group %u not in group "
  2125. "(block %llu)!", i, block_bitmap);
  2126. return 0;
  2127. }
  2128. inode_bitmap = ext4_inode_bitmap(sb, gdp);
  2129. if (inode_bitmap == sb_block) {
  2130. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2131. "Inode bitmap for group %u overlaps "
  2132. "superblock", i);
  2133. if (!sb_rdonly(sb))
  2134. return 0;
  2135. }
  2136. if (inode_bitmap < first_block || inode_bitmap > last_block) {
  2137. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2138. "Inode bitmap for group %u not in group "
  2139. "(block %llu)!", i, inode_bitmap);
  2140. return 0;
  2141. }
  2142. inode_table = ext4_inode_table(sb, gdp);
  2143. if (inode_table == sb_block) {
  2144. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2145. "Inode table for group %u overlaps "
  2146. "superblock", i);
  2147. if (!sb_rdonly(sb))
  2148. return 0;
  2149. }
  2150. if (inode_table < first_block ||
  2151. inode_table + sbi->s_itb_per_group - 1 > last_block) {
  2152. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2153. "Inode table for group %u not in group "
  2154. "(block %llu)!", i, inode_table);
  2155. return 0;
  2156. }
  2157. ext4_lock_group(sb, i);
  2158. if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
  2159. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2160. "Checksum for group %u failed (%u!=%u)",
  2161. i, le16_to_cpu(ext4_group_desc_csum(sb, i,
  2162. gdp)), le16_to_cpu(gdp->bg_checksum));
  2163. if (!sb_rdonly(sb)) {
  2164. ext4_unlock_group(sb, i);
  2165. return 0;
  2166. }
  2167. }
  2168. ext4_unlock_group(sb, i);
  2169. if (!flexbg_flag)
  2170. first_block += EXT4_BLOCKS_PER_GROUP(sb);
  2171. }
  2172. if (NULL != first_not_zeroed)
  2173. *first_not_zeroed = grp;
  2174. return 1;
  2175. }
  2176. /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
  2177. * the superblock) which were deleted from all directories, but held open by
  2178. * a process at the time of a crash. We walk the list and try to delete these
  2179. * inodes at recovery time (only with a read-write filesystem).
  2180. *
  2181. * In order to keep the orphan inode chain consistent during traversal (in
  2182. * case of crash during recovery), we link each inode into the superblock
  2183. * orphan list_head and handle it the same way as an inode deletion during
  2184. * normal operation (which journals the operations for us).
  2185. *
  2186. * We only do an iget() and an iput() on each inode, which is very safe if we
  2187. * accidentally point at an in-use or already deleted inode. The worst that
  2188. * can happen in this case is that we get a "bit already cleared" message from
  2189. * ext4_free_inode(). The only reason we would point at a wrong inode is if
  2190. * e2fsck was run on this filesystem, and it must have already done the orphan
  2191. * inode cleanup for us, so we can safely abort without any further action.
  2192. */
  2193. static void ext4_orphan_cleanup(struct super_block *sb,
  2194. struct ext4_super_block *es)
  2195. {
  2196. unsigned int s_flags = sb->s_flags;
  2197. int ret, nr_orphans = 0, nr_truncates = 0;
  2198. #ifdef CONFIG_QUOTA
  2199. int quota_update = 0;
  2200. int i;
  2201. #endif
  2202. if (!es->s_last_orphan) {
  2203. jbd_debug(4, "no orphan inodes to clean up\n");
  2204. return;
  2205. }
  2206. if (bdev_read_only(sb->s_bdev)) {
  2207. ext4_msg(sb, KERN_ERR, "write access "
  2208. "unavailable, skipping orphan cleanup");
  2209. return;
  2210. }
  2211. /* Check if feature set would not allow a r/w mount */
  2212. if (!ext4_feature_set_ok(sb, 0)) {
  2213. ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
  2214. "unknown ROCOMPAT features");
  2215. return;
  2216. }
  2217. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2218. /* don't clear list on RO mount w/ errors */
  2219. if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
  2220. ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
  2221. "clearing orphan list.\n");
  2222. es->s_last_orphan = 0;
  2223. }
  2224. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2225. return;
  2226. }
  2227. if (s_flags & SB_RDONLY) {
  2228. ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
  2229. sb->s_flags &= ~SB_RDONLY;
  2230. }
  2231. #ifdef CONFIG_QUOTA
  2232. /* Needed for iput() to work correctly and not trash data */
  2233. sb->s_flags |= SB_ACTIVE;
  2234. /*
  2235. * Turn on quotas which were not enabled for read-only mounts if
  2236. * filesystem has quota feature, so that they are updated correctly.
  2237. */
  2238. if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
  2239. int ret = ext4_enable_quotas(sb);
  2240. if (!ret)
  2241. quota_update = 1;
  2242. else
  2243. ext4_msg(sb, KERN_ERR,
  2244. "Cannot turn on quotas: error %d", ret);
  2245. }
  2246. /* Turn on journaled quotas used for old sytle */
  2247. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2248. if (EXT4_SB(sb)->s_qf_names[i]) {
  2249. int ret = ext4_quota_on_mount(sb, i);
  2250. if (!ret)
  2251. quota_update = 1;
  2252. else
  2253. ext4_msg(sb, KERN_ERR,
  2254. "Cannot turn on journaled "
  2255. "quota: type %d: error %d", i, ret);
  2256. }
  2257. }
  2258. #endif
  2259. while (es->s_last_orphan) {
  2260. struct inode *inode;
  2261. /*
  2262. * We may have encountered an error during cleanup; if
  2263. * so, skip the rest.
  2264. */
  2265. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2266. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2267. es->s_last_orphan = 0;
  2268. break;
  2269. }
  2270. inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
  2271. if (IS_ERR(inode)) {
  2272. es->s_last_orphan = 0;
  2273. break;
  2274. }
  2275. list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
  2276. dquot_initialize(inode);
  2277. if (inode->i_nlink) {
  2278. if (test_opt(sb, DEBUG))
  2279. ext4_msg(sb, KERN_DEBUG,
  2280. "%s: truncating inode %lu to %lld bytes",
  2281. __func__, inode->i_ino, inode->i_size);
  2282. jbd_debug(2, "truncating inode %lu to %lld bytes\n",
  2283. inode->i_ino, inode->i_size);
  2284. inode_lock(inode);
  2285. truncate_inode_pages(inode->i_mapping, inode->i_size);
  2286. ret = ext4_truncate(inode);
  2287. if (ret)
  2288. ext4_std_error(inode->i_sb, ret);
  2289. inode_unlock(inode);
  2290. nr_truncates++;
  2291. } else {
  2292. if (test_opt(sb, DEBUG))
  2293. ext4_msg(sb, KERN_DEBUG,
  2294. "%s: deleting unreferenced inode %lu",
  2295. __func__, inode->i_ino);
  2296. jbd_debug(2, "deleting unreferenced inode %lu\n",
  2297. inode->i_ino);
  2298. nr_orphans++;
  2299. }
  2300. iput(inode); /* The delete magic happens here! */
  2301. }
  2302. #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
  2303. if (nr_orphans)
  2304. ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
  2305. PLURAL(nr_orphans));
  2306. if (nr_truncates)
  2307. ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
  2308. PLURAL(nr_truncates));
  2309. #ifdef CONFIG_QUOTA
  2310. /* Turn off quotas if they were enabled for orphan cleanup */
  2311. if (quota_update) {
  2312. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2313. if (sb_dqopt(sb)->files[i])
  2314. dquot_quota_off(sb, i);
  2315. }
  2316. }
  2317. #endif
  2318. sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  2319. }
  2320. /*
  2321. * Maximal extent format file size.
  2322. * Resulting logical blkno at s_maxbytes must fit in our on-disk
  2323. * extent format containers, within a sector_t, and within i_blocks
  2324. * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
  2325. * so that won't be a limiting factor.
  2326. *
  2327. * However there is other limiting factor. We do store extents in the form
  2328. * of starting block and length, hence the resulting length of the extent
  2329. * covering maximum file size must fit into on-disk format containers as
  2330. * well. Given that length is always by 1 unit bigger than max unit (because
  2331. * we count 0 as well) we have to lower the s_maxbytes by one fs block.
  2332. *
  2333. * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  2334. */
  2335. static loff_t ext4_max_size(int blkbits, int has_huge_files)
  2336. {
  2337. loff_t res;
  2338. loff_t upper_limit = MAX_LFS_FILESIZE;
  2339. /* small i_blocks in vfs inode? */
  2340. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2341. /*
  2342. * CONFIG_LBDAF is not enabled implies the inode
  2343. * i_block represent total blocks in 512 bytes
  2344. * 32 == size of vfs inode i_blocks * 8
  2345. */
  2346. upper_limit = (1LL << 32) - 1;
  2347. /* total blocks in file system block size */
  2348. upper_limit >>= (blkbits - 9);
  2349. upper_limit <<= blkbits;
  2350. }
  2351. /*
  2352. * 32-bit extent-start container, ee_block. We lower the maxbytes
  2353. * by one fs block, so ee_len can cover the extent of maximum file
  2354. * size
  2355. */
  2356. res = (1LL << 32) - 1;
  2357. res <<= blkbits;
  2358. /* Sanity check against vm- & vfs- imposed limits */
  2359. if (res > upper_limit)
  2360. res = upper_limit;
  2361. return res;
  2362. }
  2363. /*
  2364. * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
  2365. * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
  2366. * We need to be 1 filesystem block less than the 2^48 sector limit.
  2367. */
  2368. static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
  2369. {
  2370. loff_t res = EXT4_NDIR_BLOCKS;
  2371. int meta_blocks;
  2372. loff_t upper_limit;
  2373. /* This is calculated to be the largest file size for a dense, block
  2374. * mapped file such that the file's total number of 512-byte sectors,
  2375. * including data and all indirect blocks, does not exceed (2^48 - 1).
  2376. *
  2377. * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
  2378. * number of 512-byte sectors of the file.
  2379. */
  2380. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2381. /*
  2382. * !has_huge_files or CONFIG_LBDAF not enabled implies that
  2383. * the inode i_block field represents total file blocks in
  2384. * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
  2385. */
  2386. upper_limit = (1LL << 32) - 1;
  2387. /* total blocks in file system block size */
  2388. upper_limit >>= (bits - 9);
  2389. } else {
  2390. /*
  2391. * We use 48 bit ext4_inode i_blocks
  2392. * With EXT4_HUGE_FILE_FL set the i_blocks
  2393. * represent total number of blocks in
  2394. * file system block size
  2395. */
  2396. upper_limit = (1LL << 48) - 1;
  2397. }
  2398. /* indirect blocks */
  2399. meta_blocks = 1;
  2400. /* double indirect blocks */
  2401. meta_blocks += 1 + (1LL << (bits-2));
  2402. /* tripple indirect blocks */
  2403. meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
  2404. upper_limit -= meta_blocks;
  2405. upper_limit <<= bits;
  2406. res += 1LL << (bits-2);
  2407. res += 1LL << (2*(bits-2));
  2408. res += 1LL << (3*(bits-2));
  2409. res <<= bits;
  2410. if (res > upper_limit)
  2411. res = upper_limit;
  2412. if (res > MAX_LFS_FILESIZE)
  2413. res = MAX_LFS_FILESIZE;
  2414. return res;
  2415. }
  2416. static ext4_fsblk_t descriptor_loc(struct super_block *sb,
  2417. ext4_fsblk_t logical_sb_block, int nr)
  2418. {
  2419. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2420. ext4_group_t bg, first_meta_bg;
  2421. int has_super = 0;
  2422. first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
  2423. if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
  2424. return logical_sb_block + nr + 1;
  2425. bg = sbi->s_desc_per_block * nr;
  2426. if (ext4_bg_has_super(sb, bg))
  2427. has_super = 1;
  2428. /*
  2429. * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
  2430. * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
  2431. * on modern mke2fs or blksize > 1k on older mke2fs) then we must
  2432. * compensate.
  2433. */
  2434. if (sb->s_blocksize == 1024 && nr == 0 &&
  2435. le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
  2436. has_super++;
  2437. return (has_super + ext4_group_first_block_no(sb, bg));
  2438. }
  2439. /**
  2440. * ext4_get_stripe_size: Get the stripe size.
  2441. * @sbi: In memory super block info
  2442. *
  2443. * If we have specified it via mount option, then
  2444. * use the mount option value. If the value specified at mount time is
  2445. * greater than the blocks per group use the super block value.
  2446. * If the super block value is greater than blocks per group return 0.
  2447. * Allocator needs it be less than blocks per group.
  2448. *
  2449. */
  2450. static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
  2451. {
  2452. unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
  2453. unsigned long stripe_width =
  2454. le32_to_cpu(sbi->s_es->s_raid_stripe_width);
  2455. int ret;
  2456. if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
  2457. ret = sbi->s_stripe;
  2458. else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
  2459. ret = stripe_width;
  2460. else if (stride && stride <= sbi->s_blocks_per_group)
  2461. ret = stride;
  2462. else
  2463. ret = 0;
  2464. /*
  2465. * If the stripe width is 1, this makes no sense and
  2466. * we set it to 0 to turn off stripe handling code.
  2467. */
  2468. if (ret <= 1)
  2469. ret = 0;
  2470. return ret;
  2471. }
  2472. /*
  2473. * Check whether this filesystem can be mounted based on
  2474. * the features present and the RDONLY/RDWR mount requested.
  2475. * Returns 1 if this filesystem can be mounted as requested,
  2476. * 0 if it cannot be.
  2477. */
  2478. static int ext4_feature_set_ok(struct super_block *sb, int readonly)
  2479. {
  2480. if (ext4_has_unknown_ext4_incompat_features(sb)) {
  2481. ext4_msg(sb, KERN_ERR,
  2482. "Couldn't mount because of "
  2483. "unsupported optional features (%x)",
  2484. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
  2485. ~EXT4_FEATURE_INCOMPAT_SUPP));
  2486. return 0;
  2487. }
  2488. if (readonly)
  2489. return 1;
  2490. if (ext4_has_feature_readonly(sb)) {
  2491. ext4_msg(sb, KERN_INFO, "filesystem is read-only");
  2492. sb->s_flags |= SB_RDONLY;
  2493. return 1;
  2494. }
  2495. /* Check that feature set is OK for a read-write mount */
  2496. if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
  2497. ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
  2498. "unsupported optional features (%x)",
  2499. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
  2500. ~EXT4_FEATURE_RO_COMPAT_SUPP));
  2501. return 0;
  2502. }
  2503. /*
  2504. * Large file size enabled file system can only be mounted
  2505. * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
  2506. */
  2507. if (ext4_has_feature_huge_file(sb)) {
  2508. if (sizeof(blkcnt_t) < sizeof(u64)) {
  2509. ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
  2510. "cannot be mounted RDWR without "
  2511. "CONFIG_LBDAF");
  2512. return 0;
  2513. }
  2514. }
  2515. if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
  2516. ext4_msg(sb, KERN_ERR,
  2517. "Can't support bigalloc feature without "
  2518. "extents feature\n");
  2519. return 0;
  2520. }
  2521. #ifndef CONFIG_QUOTA
  2522. if (ext4_has_feature_quota(sb) && !readonly) {
  2523. ext4_msg(sb, KERN_ERR,
  2524. "Filesystem with quota feature cannot be mounted RDWR "
  2525. "without CONFIG_QUOTA");
  2526. return 0;
  2527. }
  2528. if (ext4_has_feature_project(sb) && !readonly) {
  2529. ext4_msg(sb, KERN_ERR,
  2530. "Filesystem with project quota feature cannot be mounted RDWR "
  2531. "without CONFIG_QUOTA");
  2532. return 0;
  2533. }
  2534. #endif /* CONFIG_QUOTA */
  2535. return 1;
  2536. }
  2537. /*
  2538. * This function is called once a day if we have errors logged
  2539. * on the file system
  2540. */
  2541. static void print_daily_error_info(struct timer_list *t)
  2542. {
  2543. struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
  2544. struct super_block *sb = sbi->s_sb;
  2545. struct ext4_super_block *es = sbi->s_es;
  2546. if (es->s_error_count)
  2547. /* fsck newer than v1.41.13 is needed to clean this condition. */
  2548. ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
  2549. le32_to_cpu(es->s_error_count));
  2550. if (es->s_first_error_time) {
  2551. printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
  2552. sb->s_id, le32_to_cpu(es->s_first_error_time),
  2553. (int) sizeof(es->s_first_error_func),
  2554. es->s_first_error_func,
  2555. le32_to_cpu(es->s_first_error_line));
  2556. if (es->s_first_error_ino)
  2557. printk(KERN_CONT ": inode %u",
  2558. le32_to_cpu(es->s_first_error_ino));
  2559. if (es->s_first_error_block)
  2560. printk(KERN_CONT ": block %llu", (unsigned long long)
  2561. le64_to_cpu(es->s_first_error_block));
  2562. printk(KERN_CONT "\n");
  2563. }
  2564. if (es->s_last_error_time) {
  2565. printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
  2566. sb->s_id, le32_to_cpu(es->s_last_error_time),
  2567. (int) sizeof(es->s_last_error_func),
  2568. es->s_last_error_func,
  2569. le32_to_cpu(es->s_last_error_line));
  2570. if (es->s_last_error_ino)
  2571. printk(KERN_CONT ": inode %u",
  2572. le32_to_cpu(es->s_last_error_ino));
  2573. if (es->s_last_error_block)
  2574. printk(KERN_CONT ": block %llu", (unsigned long long)
  2575. le64_to_cpu(es->s_last_error_block));
  2576. printk(KERN_CONT "\n");
  2577. }
  2578. mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
  2579. }
  2580. /* Find next suitable group and run ext4_init_inode_table */
  2581. static int ext4_run_li_request(struct ext4_li_request *elr)
  2582. {
  2583. struct ext4_group_desc *gdp = NULL;
  2584. ext4_group_t group, ngroups;
  2585. struct super_block *sb;
  2586. unsigned long timeout = 0;
  2587. int ret = 0;
  2588. sb = elr->lr_super;
  2589. ngroups = EXT4_SB(sb)->s_groups_count;
  2590. for (group = elr->lr_next_group; group < ngroups; group++) {
  2591. gdp = ext4_get_group_desc(sb, group, NULL);
  2592. if (!gdp) {
  2593. ret = 1;
  2594. break;
  2595. }
  2596. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2597. break;
  2598. }
  2599. if (group >= ngroups)
  2600. ret = 1;
  2601. if (!ret) {
  2602. timeout = jiffies;
  2603. ret = ext4_init_inode_table(sb, group,
  2604. elr->lr_timeout ? 0 : 1);
  2605. if (elr->lr_timeout == 0) {
  2606. timeout = (jiffies - timeout) *
  2607. elr->lr_sbi->s_li_wait_mult;
  2608. elr->lr_timeout = timeout;
  2609. }
  2610. elr->lr_next_sched = jiffies + elr->lr_timeout;
  2611. elr->lr_next_group = group + 1;
  2612. }
  2613. return ret;
  2614. }
  2615. /*
  2616. * Remove lr_request from the list_request and free the
  2617. * request structure. Should be called with li_list_mtx held
  2618. */
  2619. static void ext4_remove_li_request(struct ext4_li_request *elr)
  2620. {
  2621. struct ext4_sb_info *sbi;
  2622. if (!elr)
  2623. return;
  2624. sbi = elr->lr_sbi;
  2625. list_del(&elr->lr_request);
  2626. sbi->s_li_request = NULL;
  2627. kfree(elr);
  2628. }
  2629. static void ext4_unregister_li_request(struct super_block *sb)
  2630. {
  2631. mutex_lock(&ext4_li_mtx);
  2632. if (!ext4_li_info) {
  2633. mutex_unlock(&ext4_li_mtx);
  2634. return;
  2635. }
  2636. mutex_lock(&ext4_li_info->li_list_mtx);
  2637. ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
  2638. mutex_unlock(&ext4_li_info->li_list_mtx);
  2639. mutex_unlock(&ext4_li_mtx);
  2640. }
  2641. static struct task_struct *ext4_lazyinit_task;
  2642. /*
  2643. * This is the function where ext4lazyinit thread lives. It walks
  2644. * through the request list searching for next scheduled filesystem.
  2645. * When such a fs is found, run the lazy initialization request
  2646. * (ext4_rn_li_request) and keep track of the time spend in this
  2647. * function. Based on that time we compute next schedule time of
  2648. * the request. When walking through the list is complete, compute
  2649. * next waking time and put itself into sleep.
  2650. */
  2651. static int ext4_lazyinit_thread(void *arg)
  2652. {
  2653. struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
  2654. struct list_head *pos, *n;
  2655. struct ext4_li_request *elr;
  2656. unsigned long next_wakeup, cur;
  2657. BUG_ON(NULL == eli);
  2658. cont_thread:
  2659. while (true) {
  2660. next_wakeup = MAX_JIFFY_OFFSET;
  2661. mutex_lock(&eli->li_list_mtx);
  2662. if (list_empty(&eli->li_request_list)) {
  2663. mutex_unlock(&eli->li_list_mtx);
  2664. goto exit_thread;
  2665. }
  2666. list_for_each_safe(pos, n, &eli->li_request_list) {
  2667. int err = 0;
  2668. int progress = 0;
  2669. elr = list_entry(pos, struct ext4_li_request,
  2670. lr_request);
  2671. if (time_before(jiffies, elr->lr_next_sched)) {
  2672. if (time_before(elr->lr_next_sched, next_wakeup))
  2673. next_wakeup = elr->lr_next_sched;
  2674. continue;
  2675. }
  2676. if (down_read_trylock(&elr->lr_super->s_umount)) {
  2677. if (sb_start_write_trylock(elr->lr_super)) {
  2678. progress = 1;
  2679. /*
  2680. * We hold sb->s_umount, sb can not
  2681. * be removed from the list, it is
  2682. * now safe to drop li_list_mtx
  2683. */
  2684. mutex_unlock(&eli->li_list_mtx);
  2685. err = ext4_run_li_request(elr);
  2686. sb_end_write(elr->lr_super);
  2687. mutex_lock(&eli->li_list_mtx);
  2688. n = pos->next;
  2689. }
  2690. up_read((&elr->lr_super->s_umount));
  2691. }
  2692. /* error, remove the lazy_init job */
  2693. if (err) {
  2694. ext4_remove_li_request(elr);
  2695. continue;
  2696. }
  2697. if (!progress) {
  2698. elr->lr_next_sched = jiffies +
  2699. (prandom_u32()
  2700. % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2701. }
  2702. if (time_before(elr->lr_next_sched, next_wakeup))
  2703. next_wakeup = elr->lr_next_sched;
  2704. }
  2705. mutex_unlock(&eli->li_list_mtx);
  2706. try_to_freeze();
  2707. cur = jiffies;
  2708. if ((time_after_eq(cur, next_wakeup)) ||
  2709. (MAX_JIFFY_OFFSET == next_wakeup)) {
  2710. cond_resched();
  2711. continue;
  2712. }
  2713. schedule_timeout_interruptible(next_wakeup - cur);
  2714. if (kthread_should_stop()) {
  2715. ext4_clear_request_list();
  2716. goto exit_thread;
  2717. }
  2718. }
  2719. exit_thread:
  2720. /*
  2721. * It looks like the request list is empty, but we need
  2722. * to check it under the li_list_mtx lock, to prevent any
  2723. * additions into it, and of course we should lock ext4_li_mtx
  2724. * to atomically free the list and ext4_li_info, because at
  2725. * this point another ext4 filesystem could be registering
  2726. * new one.
  2727. */
  2728. mutex_lock(&ext4_li_mtx);
  2729. mutex_lock(&eli->li_list_mtx);
  2730. if (!list_empty(&eli->li_request_list)) {
  2731. mutex_unlock(&eli->li_list_mtx);
  2732. mutex_unlock(&ext4_li_mtx);
  2733. goto cont_thread;
  2734. }
  2735. mutex_unlock(&eli->li_list_mtx);
  2736. kfree(ext4_li_info);
  2737. ext4_li_info = NULL;
  2738. mutex_unlock(&ext4_li_mtx);
  2739. return 0;
  2740. }
  2741. static void ext4_clear_request_list(void)
  2742. {
  2743. struct list_head *pos, *n;
  2744. struct ext4_li_request *elr;
  2745. mutex_lock(&ext4_li_info->li_list_mtx);
  2746. list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
  2747. elr = list_entry(pos, struct ext4_li_request,
  2748. lr_request);
  2749. ext4_remove_li_request(elr);
  2750. }
  2751. mutex_unlock(&ext4_li_info->li_list_mtx);
  2752. }
  2753. static int ext4_run_lazyinit_thread(void)
  2754. {
  2755. ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
  2756. ext4_li_info, "ext4lazyinit");
  2757. if (IS_ERR(ext4_lazyinit_task)) {
  2758. int err = PTR_ERR(ext4_lazyinit_task);
  2759. ext4_clear_request_list();
  2760. kfree(ext4_li_info);
  2761. ext4_li_info = NULL;
  2762. printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
  2763. "initialization thread\n",
  2764. err);
  2765. return err;
  2766. }
  2767. ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
  2768. return 0;
  2769. }
  2770. /*
  2771. * Check whether it make sense to run itable init. thread or not.
  2772. * If there is at least one uninitialized inode table, return
  2773. * corresponding group number, else the loop goes through all
  2774. * groups and return total number of groups.
  2775. */
  2776. static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
  2777. {
  2778. ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
  2779. struct ext4_group_desc *gdp = NULL;
  2780. for (group = 0; group < ngroups; group++) {
  2781. gdp = ext4_get_group_desc(sb, group, NULL);
  2782. if (!gdp)
  2783. continue;
  2784. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2785. break;
  2786. }
  2787. return group;
  2788. }
  2789. static int ext4_li_info_new(void)
  2790. {
  2791. struct ext4_lazy_init *eli = NULL;
  2792. eli = kzalloc(sizeof(*eli), GFP_KERNEL);
  2793. if (!eli)
  2794. return -ENOMEM;
  2795. INIT_LIST_HEAD(&eli->li_request_list);
  2796. mutex_init(&eli->li_list_mtx);
  2797. eli->li_state |= EXT4_LAZYINIT_QUIT;
  2798. ext4_li_info = eli;
  2799. return 0;
  2800. }
  2801. static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
  2802. ext4_group_t start)
  2803. {
  2804. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2805. struct ext4_li_request *elr;
  2806. elr = kzalloc(sizeof(*elr), GFP_KERNEL);
  2807. if (!elr)
  2808. return NULL;
  2809. elr->lr_super = sb;
  2810. elr->lr_sbi = sbi;
  2811. elr->lr_next_group = start;
  2812. /*
  2813. * Randomize first schedule time of the request to
  2814. * spread the inode table initialization requests
  2815. * better.
  2816. */
  2817. elr->lr_next_sched = jiffies + (prandom_u32() %
  2818. (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2819. return elr;
  2820. }
  2821. int ext4_register_li_request(struct super_block *sb,
  2822. ext4_group_t first_not_zeroed)
  2823. {
  2824. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2825. struct ext4_li_request *elr = NULL;
  2826. ext4_group_t ngroups = sbi->s_groups_count;
  2827. int ret = 0;
  2828. mutex_lock(&ext4_li_mtx);
  2829. if (sbi->s_li_request != NULL) {
  2830. /*
  2831. * Reset timeout so it can be computed again, because
  2832. * s_li_wait_mult might have changed.
  2833. */
  2834. sbi->s_li_request->lr_timeout = 0;
  2835. goto out;
  2836. }
  2837. if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
  2838. !test_opt(sb, INIT_INODE_TABLE))
  2839. goto out;
  2840. elr = ext4_li_request_new(sb, first_not_zeroed);
  2841. if (!elr) {
  2842. ret = -ENOMEM;
  2843. goto out;
  2844. }
  2845. if (NULL == ext4_li_info) {
  2846. ret = ext4_li_info_new();
  2847. if (ret)
  2848. goto out;
  2849. }
  2850. mutex_lock(&ext4_li_info->li_list_mtx);
  2851. list_add(&elr->lr_request, &ext4_li_info->li_request_list);
  2852. mutex_unlock(&ext4_li_info->li_list_mtx);
  2853. sbi->s_li_request = elr;
  2854. /*
  2855. * set elr to NULL here since it has been inserted to
  2856. * the request_list and the removal and free of it is
  2857. * handled by ext4_clear_request_list from now on.
  2858. */
  2859. elr = NULL;
  2860. if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
  2861. ret = ext4_run_lazyinit_thread();
  2862. if (ret)
  2863. goto out;
  2864. }
  2865. out:
  2866. mutex_unlock(&ext4_li_mtx);
  2867. if (ret)
  2868. kfree(elr);
  2869. return ret;
  2870. }
  2871. /*
  2872. * We do not need to lock anything since this is called on
  2873. * module unload.
  2874. */
  2875. static void ext4_destroy_lazyinit_thread(void)
  2876. {
  2877. /*
  2878. * If thread exited earlier
  2879. * there's nothing to be done.
  2880. */
  2881. if (!ext4_li_info || !ext4_lazyinit_task)
  2882. return;
  2883. kthread_stop(ext4_lazyinit_task);
  2884. }
  2885. static int set_journal_csum_feature_set(struct super_block *sb)
  2886. {
  2887. int ret = 1;
  2888. int compat, incompat;
  2889. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2890. if (ext4_has_metadata_csum(sb)) {
  2891. /* journal checksum v3 */
  2892. compat = 0;
  2893. incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
  2894. } else {
  2895. /* journal checksum v1 */
  2896. compat = JBD2_FEATURE_COMPAT_CHECKSUM;
  2897. incompat = 0;
  2898. }
  2899. jbd2_journal_clear_features(sbi->s_journal,
  2900. JBD2_FEATURE_COMPAT_CHECKSUM, 0,
  2901. JBD2_FEATURE_INCOMPAT_CSUM_V3 |
  2902. JBD2_FEATURE_INCOMPAT_CSUM_V2);
  2903. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  2904. ret = jbd2_journal_set_features(sbi->s_journal,
  2905. compat, 0,
  2906. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
  2907. incompat);
  2908. } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
  2909. ret = jbd2_journal_set_features(sbi->s_journal,
  2910. compat, 0,
  2911. incompat);
  2912. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2913. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2914. } else {
  2915. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2916. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2917. }
  2918. return ret;
  2919. }
  2920. /*
  2921. * Note: calculating the overhead so we can be compatible with
  2922. * historical BSD practice is quite difficult in the face of
  2923. * clusters/bigalloc. This is because multiple metadata blocks from
  2924. * different block group can end up in the same allocation cluster.
  2925. * Calculating the exact overhead in the face of clustered allocation
  2926. * requires either O(all block bitmaps) in memory or O(number of block
  2927. * groups**2) in time. We will still calculate the superblock for
  2928. * older file systems --- and if we come across with a bigalloc file
  2929. * system with zero in s_overhead_clusters the estimate will be close to
  2930. * correct especially for very large cluster sizes --- but for newer
  2931. * file systems, it's better to calculate this figure once at mkfs
  2932. * time, and store it in the superblock. If the superblock value is
  2933. * present (even for non-bigalloc file systems), we will use it.
  2934. */
  2935. static int count_overhead(struct super_block *sb, ext4_group_t grp,
  2936. char *buf)
  2937. {
  2938. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2939. struct ext4_group_desc *gdp;
  2940. ext4_fsblk_t first_block, last_block, b;
  2941. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2942. int s, j, count = 0;
  2943. if (!ext4_has_feature_bigalloc(sb))
  2944. return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
  2945. sbi->s_itb_per_group + 2);
  2946. first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
  2947. (grp * EXT4_BLOCKS_PER_GROUP(sb));
  2948. last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
  2949. for (i = 0; i < ngroups; i++) {
  2950. gdp = ext4_get_group_desc(sb, i, NULL);
  2951. b = ext4_block_bitmap(sb, gdp);
  2952. if (b >= first_block && b <= last_block) {
  2953. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2954. count++;
  2955. }
  2956. b = ext4_inode_bitmap(sb, gdp);
  2957. if (b >= first_block && b <= last_block) {
  2958. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2959. count++;
  2960. }
  2961. b = ext4_inode_table(sb, gdp);
  2962. if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
  2963. for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
  2964. int c = EXT4_B2C(sbi, b - first_block);
  2965. ext4_set_bit(c, buf);
  2966. count++;
  2967. }
  2968. if (i != grp)
  2969. continue;
  2970. s = 0;
  2971. if (ext4_bg_has_super(sb, grp)) {
  2972. ext4_set_bit(s++, buf);
  2973. count++;
  2974. }
  2975. j = ext4_bg_num_gdb(sb, grp);
  2976. if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
  2977. ext4_error(sb, "Invalid number of block group "
  2978. "descriptor blocks: %d", j);
  2979. j = EXT4_BLOCKS_PER_GROUP(sb) - s;
  2980. }
  2981. count += j;
  2982. for (; j > 0; j--)
  2983. ext4_set_bit(EXT4_B2C(sbi, s++), buf);
  2984. }
  2985. if (!count)
  2986. return 0;
  2987. return EXT4_CLUSTERS_PER_GROUP(sb) -
  2988. ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
  2989. }
  2990. /*
  2991. * Compute the overhead and stash it in sbi->s_overhead
  2992. */
  2993. int ext4_calculate_overhead(struct super_block *sb)
  2994. {
  2995. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2996. struct ext4_super_block *es = sbi->s_es;
  2997. struct inode *j_inode;
  2998. unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
  2999. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  3000. ext4_fsblk_t overhead = 0;
  3001. char *buf = (char *) get_zeroed_page(GFP_NOFS);
  3002. if (!buf)
  3003. return -ENOMEM;
  3004. /*
  3005. * Compute the overhead (FS structures). This is constant
  3006. * for a given filesystem unless the number of block groups
  3007. * changes so we cache the previous value until it does.
  3008. */
  3009. /*
  3010. * All of the blocks before first_data_block are overhead
  3011. */
  3012. overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
  3013. /*
  3014. * Add the overhead found in each block group
  3015. */
  3016. for (i = 0; i < ngroups; i++) {
  3017. int blks;
  3018. blks = count_overhead(sb, i, buf);
  3019. overhead += blks;
  3020. if (blks)
  3021. memset(buf, 0, PAGE_SIZE);
  3022. cond_resched();
  3023. }
  3024. /*
  3025. * Add the internal journal blocks whether the journal has been
  3026. * loaded or not
  3027. */
  3028. if (sbi->s_journal && !sbi->journal_bdev)
  3029. overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
  3030. else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
  3031. j_inode = ext4_get_journal_inode(sb, j_inum);
  3032. if (j_inode) {
  3033. j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
  3034. overhead += EXT4_NUM_B2C(sbi, j_blocks);
  3035. iput(j_inode);
  3036. } else {
  3037. ext4_msg(sb, KERN_ERR, "can't get journal size");
  3038. }
  3039. }
  3040. sbi->s_overhead = overhead;
  3041. smp_wmb();
  3042. free_page((unsigned long) buf);
  3043. return 0;
  3044. }
  3045. static void ext4_set_resv_clusters(struct super_block *sb)
  3046. {
  3047. ext4_fsblk_t resv_clusters;
  3048. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3049. /*
  3050. * There's no need to reserve anything when we aren't using extents.
  3051. * The space estimates are exact, there are no unwritten extents,
  3052. * hole punching doesn't need new metadata... This is needed especially
  3053. * to keep ext2/3 backward compatibility.
  3054. */
  3055. if (!ext4_has_feature_extents(sb))
  3056. return;
  3057. /*
  3058. * By default we reserve 2% or 4096 clusters, whichever is smaller.
  3059. * This should cover the situations where we can not afford to run
  3060. * out of space like for example punch hole, or converting
  3061. * unwritten extents in delalloc path. In most cases such
  3062. * allocation would require 1, or 2 blocks, higher numbers are
  3063. * very rare.
  3064. */
  3065. resv_clusters = (ext4_blocks_count(sbi->s_es) >>
  3066. sbi->s_cluster_bits);
  3067. do_div(resv_clusters, 50);
  3068. resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
  3069. atomic64_set(&sbi->s_resv_clusters, resv_clusters);
  3070. }
  3071. static int ext4_fill_super(struct super_block *sb, void *data, int silent)
  3072. {
  3073. struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
  3074. char *orig_data = kstrdup(data, GFP_KERNEL);
  3075. struct buffer_head *bh;
  3076. struct ext4_super_block *es = NULL;
  3077. struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
  3078. ext4_fsblk_t block;
  3079. ext4_fsblk_t sb_block = get_sb_block(&data);
  3080. ext4_fsblk_t logical_sb_block;
  3081. unsigned long offset = 0;
  3082. unsigned long journal_devnum = 0;
  3083. unsigned long def_mount_opts;
  3084. struct inode *root;
  3085. const char *descr;
  3086. int ret = -ENOMEM;
  3087. int blocksize, clustersize;
  3088. unsigned int db_count;
  3089. unsigned int i;
  3090. int needs_recovery, has_huge_files, has_bigalloc;
  3091. __u64 blocks_count;
  3092. int err = 0;
  3093. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  3094. ext4_group_t first_not_zeroed;
  3095. if ((data && !orig_data) || !sbi)
  3096. goto out_free_base;
  3097. sbi->s_daxdev = dax_dev;
  3098. sbi->s_blockgroup_lock =
  3099. kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
  3100. if (!sbi->s_blockgroup_lock)
  3101. goto out_free_base;
  3102. sb->s_fs_info = sbi;
  3103. sbi->s_sb = sb;
  3104. sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
  3105. sbi->s_sb_block = sb_block;
  3106. if (sb->s_bdev->bd_part)
  3107. sbi->s_sectors_written_start =
  3108. part_stat_read(sb->s_bdev->bd_part, sectors[1]);
  3109. /* Cleanup superblock name */
  3110. strreplace(sb->s_id, '/', '!');
  3111. /* -EINVAL is default */
  3112. ret = -EINVAL;
  3113. blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
  3114. if (!blocksize) {
  3115. ext4_msg(sb, KERN_ERR, "unable to set blocksize");
  3116. goto out_fail;
  3117. }
  3118. /*
  3119. * The ext4 superblock will not be buffer aligned for other than 1kB
  3120. * block sizes. We need to calculate the offset from buffer start.
  3121. */
  3122. if (blocksize != EXT4_MIN_BLOCK_SIZE) {
  3123. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3124. offset = do_div(logical_sb_block, blocksize);
  3125. } else {
  3126. logical_sb_block = sb_block;
  3127. }
  3128. if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
  3129. ext4_msg(sb, KERN_ERR, "unable to read superblock");
  3130. goto out_fail;
  3131. }
  3132. /*
  3133. * Note: s_es must be initialized as soon as possible because
  3134. * some ext4 macro-instructions depend on its value
  3135. */
  3136. es = (struct ext4_super_block *) (bh->b_data + offset);
  3137. sbi->s_es = es;
  3138. sb->s_magic = le16_to_cpu(es->s_magic);
  3139. if (sb->s_magic != EXT4_SUPER_MAGIC)
  3140. goto cantfind_ext4;
  3141. sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
  3142. /* Warn if metadata_csum and gdt_csum are both set. */
  3143. if (ext4_has_feature_metadata_csum(sb) &&
  3144. ext4_has_feature_gdt_csum(sb))
  3145. ext4_warning(sb, "metadata_csum and uninit_bg are "
  3146. "redundant flags; please run fsck.");
  3147. /* Check for a known checksum algorithm */
  3148. if (!ext4_verify_csum_type(sb, es)) {
  3149. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3150. "unknown checksum algorithm.");
  3151. silent = 1;
  3152. goto cantfind_ext4;
  3153. }
  3154. /* Load the checksum driver */
  3155. sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
  3156. if (IS_ERR(sbi->s_chksum_driver)) {
  3157. ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
  3158. ret = PTR_ERR(sbi->s_chksum_driver);
  3159. sbi->s_chksum_driver = NULL;
  3160. goto failed_mount;
  3161. }
  3162. /* Check superblock checksum */
  3163. if (!ext4_superblock_csum_verify(sb, es)) {
  3164. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3165. "invalid superblock checksum. Run e2fsck?");
  3166. silent = 1;
  3167. ret = -EFSBADCRC;
  3168. goto cantfind_ext4;
  3169. }
  3170. /* Precompute checksum seed for all metadata */
  3171. if (ext4_has_feature_csum_seed(sb))
  3172. sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
  3173. else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
  3174. sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
  3175. sizeof(es->s_uuid));
  3176. /* Set defaults before we parse the mount options */
  3177. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  3178. set_opt(sb, INIT_INODE_TABLE);
  3179. if (def_mount_opts & EXT4_DEFM_DEBUG)
  3180. set_opt(sb, DEBUG);
  3181. if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
  3182. set_opt(sb, GRPID);
  3183. if (def_mount_opts & EXT4_DEFM_UID16)
  3184. set_opt(sb, NO_UID32);
  3185. /* xattr user namespace & acls are now defaulted on */
  3186. set_opt(sb, XATTR_USER);
  3187. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  3188. set_opt(sb, POSIX_ACL);
  3189. #endif
  3190. /* don't forget to enable journal_csum when metadata_csum is enabled. */
  3191. if (ext4_has_metadata_csum(sb))
  3192. set_opt(sb, JOURNAL_CHECKSUM);
  3193. if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
  3194. set_opt(sb, JOURNAL_DATA);
  3195. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
  3196. set_opt(sb, ORDERED_DATA);
  3197. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
  3198. set_opt(sb, WRITEBACK_DATA);
  3199. if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
  3200. set_opt(sb, ERRORS_PANIC);
  3201. else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
  3202. set_opt(sb, ERRORS_CONT);
  3203. else
  3204. set_opt(sb, ERRORS_RO);
  3205. /* block_validity enabled by default; disable with noblock_validity */
  3206. set_opt(sb, BLOCK_VALIDITY);
  3207. if (def_mount_opts & EXT4_DEFM_DISCARD)
  3208. set_opt(sb, DISCARD);
  3209. sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
  3210. sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
  3211. sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
  3212. sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
  3213. sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
  3214. if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
  3215. set_opt(sb, BARRIER);
  3216. /*
  3217. * enable delayed allocation by default
  3218. * Use -o nodelalloc to turn it off
  3219. */
  3220. if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
  3221. ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
  3222. set_opt(sb, DELALLOC);
  3223. /*
  3224. * set default s_li_wait_mult for lazyinit, for the case there is
  3225. * no mount option specified.
  3226. */
  3227. sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
  3228. if (sbi->s_es->s_mount_opts[0]) {
  3229. char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
  3230. sizeof(sbi->s_es->s_mount_opts),
  3231. GFP_KERNEL);
  3232. if (!s_mount_opts)
  3233. goto failed_mount;
  3234. if (!parse_options(s_mount_opts, sb, &journal_devnum,
  3235. &journal_ioprio, 0)) {
  3236. ext4_msg(sb, KERN_WARNING,
  3237. "failed to parse options in superblock: %s",
  3238. s_mount_opts);
  3239. }
  3240. kfree(s_mount_opts);
  3241. }
  3242. sbi->s_def_mount_opt = sbi->s_mount_opt;
  3243. if (!parse_options((char *) data, sb, &journal_devnum,
  3244. &journal_ioprio, 0))
  3245. goto failed_mount;
  3246. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  3247. printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
  3248. "with data=journal disables delayed "
  3249. "allocation and O_DIRECT support!\n");
  3250. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  3251. ext4_msg(sb, KERN_ERR, "can't mount with "
  3252. "both data=journal and delalloc");
  3253. goto failed_mount;
  3254. }
  3255. if (test_opt(sb, DIOREAD_NOLOCK)) {
  3256. ext4_msg(sb, KERN_ERR, "can't mount with "
  3257. "both data=journal and dioread_nolock");
  3258. goto failed_mount;
  3259. }
  3260. if (test_opt(sb, DAX)) {
  3261. ext4_msg(sb, KERN_ERR, "can't mount with "
  3262. "both data=journal and dax");
  3263. goto failed_mount;
  3264. }
  3265. if (ext4_has_feature_encrypt(sb)) {
  3266. ext4_msg(sb, KERN_WARNING,
  3267. "encrypted files will use data=ordered "
  3268. "instead of data journaling mode");
  3269. }
  3270. if (test_opt(sb, DELALLOC))
  3271. clear_opt(sb, DELALLOC);
  3272. } else {
  3273. sb->s_iflags |= SB_I_CGROUPWB;
  3274. }
  3275. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  3276. (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
  3277. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
  3278. (ext4_has_compat_features(sb) ||
  3279. ext4_has_ro_compat_features(sb) ||
  3280. ext4_has_incompat_features(sb)))
  3281. ext4_msg(sb, KERN_WARNING,
  3282. "feature flags set on rev 0 fs, "
  3283. "running e2fsck is recommended");
  3284. if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
  3285. set_opt2(sb, HURD_COMPAT);
  3286. if (ext4_has_feature_64bit(sb)) {
  3287. ext4_msg(sb, KERN_ERR,
  3288. "The Hurd can't support 64-bit file systems");
  3289. goto failed_mount;
  3290. }
  3291. /*
  3292. * ea_inode feature uses l_i_version field which is not
  3293. * available in HURD_COMPAT mode.
  3294. */
  3295. if (ext4_has_feature_ea_inode(sb)) {
  3296. ext4_msg(sb, KERN_ERR,
  3297. "ea_inode feature is not supported for Hurd");
  3298. goto failed_mount;
  3299. }
  3300. }
  3301. if (IS_EXT2_SB(sb)) {
  3302. if (ext2_feature_set_ok(sb))
  3303. ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
  3304. "using the ext4 subsystem");
  3305. else {
  3306. /*
  3307. * If we're probing be silent, if this looks like
  3308. * it's actually an ext[34] filesystem.
  3309. */
  3310. if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
  3311. goto failed_mount;
  3312. ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
  3313. "to feature incompatibilities");
  3314. goto failed_mount;
  3315. }
  3316. }
  3317. if (IS_EXT3_SB(sb)) {
  3318. if (ext3_feature_set_ok(sb))
  3319. ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
  3320. "using the ext4 subsystem");
  3321. else {
  3322. /*
  3323. * If we're probing be silent, if this looks like
  3324. * it's actually an ext4 filesystem.
  3325. */
  3326. if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
  3327. goto failed_mount;
  3328. ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
  3329. "to feature incompatibilities");
  3330. goto failed_mount;
  3331. }
  3332. }
  3333. /*
  3334. * Check feature flags regardless of the revision level, since we
  3335. * previously didn't change the revision level when setting the flags,
  3336. * so there is a chance incompat flags are set on a rev 0 filesystem.
  3337. */
  3338. if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
  3339. goto failed_mount;
  3340. blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
  3341. if (blocksize < EXT4_MIN_BLOCK_SIZE ||
  3342. blocksize > EXT4_MAX_BLOCK_SIZE) {
  3343. ext4_msg(sb, KERN_ERR,
  3344. "Unsupported filesystem blocksize %d (%d log_block_size)",
  3345. blocksize, le32_to_cpu(es->s_log_block_size));
  3346. goto failed_mount;
  3347. }
  3348. if (le32_to_cpu(es->s_log_block_size) >
  3349. (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3350. ext4_msg(sb, KERN_ERR,
  3351. "Invalid log block size: %u",
  3352. le32_to_cpu(es->s_log_block_size));
  3353. goto failed_mount;
  3354. }
  3355. if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
  3356. ext4_msg(sb, KERN_ERR,
  3357. "Number of reserved GDT blocks insanely large: %d",
  3358. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
  3359. goto failed_mount;
  3360. }
  3361. if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
  3362. if (ext4_has_feature_inline_data(sb)) {
  3363. ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
  3364. " that may contain inline data");
  3365. sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
  3366. }
  3367. if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
  3368. ext4_msg(sb, KERN_ERR,
  3369. "DAX unsupported by block device. Turning off DAX.");
  3370. sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
  3371. }
  3372. }
  3373. if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
  3374. ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
  3375. es->s_encryption_level);
  3376. goto failed_mount;
  3377. }
  3378. if (sb->s_blocksize != blocksize) {
  3379. /* Validate the filesystem blocksize */
  3380. if (!sb_set_blocksize(sb, blocksize)) {
  3381. ext4_msg(sb, KERN_ERR, "bad block size %d",
  3382. blocksize);
  3383. goto failed_mount;
  3384. }
  3385. brelse(bh);
  3386. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3387. offset = do_div(logical_sb_block, blocksize);
  3388. bh = sb_bread_unmovable(sb, logical_sb_block);
  3389. if (!bh) {
  3390. ext4_msg(sb, KERN_ERR,
  3391. "Can't read superblock on 2nd try");
  3392. goto failed_mount;
  3393. }
  3394. es = (struct ext4_super_block *)(bh->b_data + offset);
  3395. sbi->s_es = es;
  3396. if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
  3397. ext4_msg(sb, KERN_ERR,
  3398. "Magic mismatch, very weird!");
  3399. goto failed_mount;
  3400. }
  3401. }
  3402. has_huge_files = ext4_has_feature_huge_file(sb);
  3403. sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
  3404. has_huge_files);
  3405. sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
  3406. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
  3407. sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
  3408. sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
  3409. } else {
  3410. sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
  3411. sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
  3412. if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
  3413. (!is_power_of_2(sbi->s_inode_size)) ||
  3414. (sbi->s_inode_size > blocksize)) {
  3415. ext4_msg(sb, KERN_ERR,
  3416. "unsupported inode size: %d",
  3417. sbi->s_inode_size);
  3418. goto failed_mount;
  3419. }
  3420. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
  3421. sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
  3422. }
  3423. sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
  3424. if (ext4_has_feature_64bit(sb)) {
  3425. if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
  3426. sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
  3427. !is_power_of_2(sbi->s_desc_size)) {
  3428. ext4_msg(sb, KERN_ERR,
  3429. "unsupported descriptor size %lu",
  3430. sbi->s_desc_size);
  3431. goto failed_mount;
  3432. }
  3433. } else
  3434. sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
  3435. sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
  3436. sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
  3437. sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
  3438. if (sbi->s_inodes_per_block == 0)
  3439. goto cantfind_ext4;
  3440. if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
  3441. sbi->s_inodes_per_group > blocksize * 8) {
  3442. ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
  3443. sbi->s_blocks_per_group);
  3444. goto failed_mount;
  3445. }
  3446. sbi->s_itb_per_group = sbi->s_inodes_per_group /
  3447. sbi->s_inodes_per_block;
  3448. sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
  3449. sbi->s_sbh = bh;
  3450. sbi->s_mount_state = le16_to_cpu(es->s_state);
  3451. sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
  3452. sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
  3453. for (i = 0; i < 4; i++)
  3454. sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
  3455. sbi->s_def_hash_version = es->s_def_hash_version;
  3456. if (ext4_has_feature_dir_index(sb)) {
  3457. i = le32_to_cpu(es->s_flags);
  3458. if (i & EXT2_FLAGS_UNSIGNED_HASH)
  3459. sbi->s_hash_unsigned = 3;
  3460. else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
  3461. #ifdef __CHAR_UNSIGNED__
  3462. if (!sb_rdonly(sb))
  3463. es->s_flags |=
  3464. cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
  3465. sbi->s_hash_unsigned = 3;
  3466. #else
  3467. if (!sb_rdonly(sb))
  3468. es->s_flags |=
  3469. cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
  3470. #endif
  3471. }
  3472. }
  3473. /* Handle clustersize */
  3474. clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
  3475. has_bigalloc = ext4_has_feature_bigalloc(sb);
  3476. if (has_bigalloc) {
  3477. if (clustersize < blocksize) {
  3478. ext4_msg(sb, KERN_ERR,
  3479. "cluster size (%d) smaller than "
  3480. "block size (%d)", clustersize, blocksize);
  3481. goto failed_mount;
  3482. }
  3483. if (le32_to_cpu(es->s_log_cluster_size) >
  3484. (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3485. ext4_msg(sb, KERN_ERR,
  3486. "Invalid log cluster size: %u",
  3487. le32_to_cpu(es->s_log_cluster_size));
  3488. goto failed_mount;
  3489. }
  3490. sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
  3491. le32_to_cpu(es->s_log_block_size);
  3492. sbi->s_clusters_per_group =
  3493. le32_to_cpu(es->s_clusters_per_group);
  3494. if (sbi->s_clusters_per_group > blocksize * 8) {
  3495. ext4_msg(sb, KERN_ERR,
  3496. "#clusters per group too big: %lu",
  3497. sbi->s_clusters_per_group);
  3498. goto failed_mount;
  3499. }
  3500. if (sbi->s_blocks_per_group !=
  3501. (sbi->s_clusters_per_group * (clustersize / blocksize))) {
  3502. ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
  3503. "clusters per group (%lu) inconsistent",
  3504. sbi->s_blocks_per_group,
  3505. sbi->s_clusters_per_group);
  3506. goto failed_mount;
  3507. }
  3508. } else {
  3509. if (clustersize != blocksize) {
  3510. ext4_warning(sb, "fragment/cluster size (%d) != "
  3511. "block size (%d)", clustersize,
  3512. blocksize);
  3513. clustersize = blocksize;
  3514. }
  3515. if (sbi->s_blocks_per_group > blocksize * 8) {
  3516. ext4_msg(sb, KERN_ERR,
  3517. "#blocks per group too big: %lu",
  3518. sbi->s_blocks_per_group);
  3519. goto failed_mount;
  3520. }
  3521. sbi->s_clusters_per_group = sbi->s_blocks_per_group;
  3522. sbi->s_cluster_bits = 0;
  3523. }
  3524. sbi->s_cluster_ratio = clustersize / blocksize;
  3525. /* Do we have standard group size of clustersize * 8 blocks ? */
  3526. if (sbi->s_blocks_per_group == clustersize << 3)
  3527. set_opt2(sb, STD_GROUP_SIZE);
  3528. /*
  3529. * Test whether we have more sectors than will fit in sector_t,
  3530. * and whether the max offset is addressable by the page cache.
  3531. */
  3532. err = generic_check_addressable(sb->s_blocksize_bits,
  3533. ext4_blocks_count(es));
  3534. if (err) {
  3535. ext4_msg(sb, KERN_ERR, "filesystem"
  3536. " too large to mount safely on this system");
  3537. if (sizeof(sector_t) < 8)
  3538. ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
  3539. goto failed_mount;
  3540. }
  3541. if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
  3542. goto cantfind_ext4;
  3543. /* check blocks count against device size */
  3544. blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
  3545. if (blocks_count && ext4_blocks_count(es) > blocks_count) {
  3546. ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
  3547. "exceeds size of device (%llu blocks)",
  3548. ext4_blocks_count(es), blocks_count);
  3549. goto failed_mount;
  3550. }
  3551. /*
  3552. * It makes no sense for the first data block to be beyond the end
  3553. * of the filesystem.
  3554. */
  3555. if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
  3556. ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
  3557. "block %u is beyond end of filesystem (%llu)",
  3558. le32_to_cpu(es->s_first_data_block),
  3559. ext4_blocks_count(es));
  3560. goto failed_mount;
  3561. }
  3562. blocks_count = (ext4_blocks_count(es) -
  3563. le32_to_cpu(es->s_first_data_block) +
  3564. EXT4_BLOCKS_PER_GROUP(sb) - 1);
  3565. do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
  3566. if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
  3567. ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
  3568. "(block count %llu, first data block %u, "
  3569. "blocks per group %lu)", sbi->s_groups_count,
  3570. ext4_blocks_count(es),
  3571. le32_to_cpu(es->s_first_data_block),
  3572. EXT4_BLOCKS_PER_GROUP(sb));
  3573. goto failed_mount;
  3574. }
  3575. sbi->s_groups_count = blocks_count;
  3576. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  3577. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  3578. db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
  3579. EXT4_DESC_PER_BLOCK(sb);
  3580. if (ext4_has_feature_meta_bg(sb)) {
  3581. if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
  3582. ext4_msg(sb, KERN_WARNING,
  3583. "first meta block group too large: %u "
  3584. "(group descriptor block count %u)",
  3585. le32_to_cpu(es->s_first_meta_bg), db_count);
  3586. goto failed_mount;
  3587. }
  3588. }
  3589. sbi->s_group_desc = kvmalloc_array(db_count,
  3590. sizeof(struct buffer_head *),
  3591. GFP_KERNEL);
  3592. if (sbi->s_group_desc == NULL) {
  3593. ext4_msg(sb, KERN_ERR, "not enough memory");
  3594. ret = -ENOMEM;
  3595. goto failed_mount;
  3596. }
  3597. bgl_lock_init(sbi->s_blockgroup_lock);
  3598. /* Pre-read the descriptors into the buffer cache */
  3599. for (i = 0; i < db_count; i++) {
  3600. block = descriptor_loc(sb, logical_sb_block, i);
  3601. sb_breadahead(sb, block);
  3602. }
  3603. for (i = 0; i < db_count; i++) {
  3604. block = descriptor_loc(sb, logical_sb_block, i);
  3605. sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
  3606. if (!sbi->s_group_desc[i]) {
  3607. ext4_msg(sb, KERN_ERR,
  3608. "can't read group descriptor %d", i);
  3609. db_count = i;
  3610. goto failed_mount2;
  3611. }
  3612. }
  3613. if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
  3614. ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
  3615. ret = -EFSCORRUPTED;
  3616. goto failed_mount2;
  3617. }
  3618. sbi->s_gdb_count = db_count;
  3619. timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
  3620. /* Register extent status tree shrinker */
  3621. if (ext4_es_register_shrinker(sbi))
  3622. goto failed_mount3;
  3623. sbi->s_stripe = ext4_get_stripe_size(sbi);
  3624. sbi->s_extent_max_zeroout_kb = 32;
  3625. /*
  3626. * set up enough so that it can read an inode
  3627. */
  3628. sb->s_op = &ext4_sops;
  3629. sb->s_export_op = &ext4_export_ops;
  3630. sb->s_xattr = ext4_xattr_handlers;
  3631. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  3632. sb->s_cop = &ext4_cryptops;
  3633. #endif
  3634. #ifdef CONFIG_QUOTA
  3635. sb->dq_op = &ext4_quota_operations;
  3636. if (ext4_has_feature_quota(sb))
  3637. sb->s_qcop = &dquot_quotactl_sysfile_ops;
  3638. else
  3639. sb->s_qcop = &ext4_qctl_operations;
  3640. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  3641. #endif
  3642. memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
  3643. INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
  3644. mutex_init(&sbi->s_orphan_lock);
  3645. sb->s_root = NULL;
  3646. needs_recovery = (es->s_last_orphan != 0 ||
  3647. ext4_has_feature_journal_needs_recovery(sb));
  3648. if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
  3649. if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
  3650. goto failed_mount3a;
  3651. /*
  3652. * The first inode we look at is the journal inode. Don't try
  3653. * root first: it may be modified in the journal!
  3654. */
  3655. if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
  3656. err = ext4_load_journal(sb, es, journal_devnum);
  3657. if (err)
  3658. goto failed_mount3a;
  3659. } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
  3660. ext4_has_feature_journal_needs_recovery(sb)) {
  3661. ext4_msg(sb, KERN_ERR, "required journal recovery "
  3662. "suppressed and not mounted read-only");
  3663. goto failed_mount_wq;
  3664. } else {
  3665. /* Nojournal mode, all journal mount options are illegal */
  3666. if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
  3667. ext4_msg(sb, KERN_ERR, "can't mount with "
  3668. "journal_checksum, fs mounted w/o journal");
  3669. goto failed_mount_wq;
  3670. }
  3671. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3672. ext4_msg(sb, KERN_ERR, "can't mount with "
  3673. "journal_async_commit, fs mounted w/o journal");
  3674. goto failed_mount_wq;
  3675. }
  3676. if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
  3677. ext4_msg(sb, KERN_ERR, "can't mount with "
  3678. "commit=%lu, fs mounted w/o journal",
  3679. sbi->s_commit_interval / HZ);
  3680. goto failed_mount_wq;
  3681. }
  3682. if (EXT4_MOUNT_DATA_FLAGS &
  3683. (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
  3684. ext4_msg(sb, KERN_ERR, "can't mount with "
  3685. "data=, fs mounted w/o journal");
  3686. goto failed_mount_wq;
  3687. }
  3688. sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
  3689. clear_opt(sb, JOURNAL_CHECKSUM);
  3690. clear_opt(sb, DATA_FLAGS);
  3691. sbi->s_journal = NULL;
  3692. needs_recovery = 0;
  3693. goto no_journal;
  3694. }
  3695. if (ext4_has_feature_64bit(sb) &&
  3696. !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
  3697. JBD2_FEATURE_INCOMPAT_64BIT)) {
  3698. ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
  3699. goto failed_mount_wq;
  3700. }
  3701. if (!set_journal_csum_feature_set(sb)) {
  3702. ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
  3703. "feature set");
  3704. goto failed_mount_wq;
  3705. }
  3706. /* We have now updated the journal if required, so we can
  3707. * validate the data journaling mode. */
  3708. switch (test_opt(sb, DATA_FLAGS)) {
  3709. case 0:
  3710. /* No mode set, assume a default based on the journal
  3711. * capabilities: ORDERED_DATA if the journal can
  3712. * cope, else JOURNAL_DATA
  3713. */
  3714. if (jbd2_journal_check_available_features
  3715. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
  3716. set_opt(sb, ORDERED_DATA);
  3717. sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
  3718. } else {
  3719. set_opt(sb, JOURNAL_DATA);
  3720. sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
  3721. }
  3722. break;
  3723. case EXT4_MOUNT_ORDERED_DATA:
  3724. case EXT4_MOUNT_WRITEBACK_DATA:
  3725. if (!jbd2_journal_check_available_features
  3726. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
  3727. ext4_msg(sb, KERN_ERR, "Journal does not support "
  3728. "requested data journaling mode");
  3729. goto failed_mount_wq;
  3730. }
  3731. default:
  3732. break;
  3733. }
  3734. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
  3735. test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3736. ext4_msg(sb, KERN_ERR, "can't mount with "
  3737. "journal_async_commit in data=ordered mode");
  3738. goto failed_mount_wq;
  3739. }
  3740. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  3741. sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
  3742. no_journal:
  3743. if (!test_opt(sb, NO_MBCACHE)) {
  3744. sbi->s_ea_block_cache = ext4_xattr_create_cache();
  3745. if (!sbi->s_ea_block_cache) {
  3746. ext4_msg(sb, KERN_ERR,
  3747. "Failed to create ea_block_cache");
  3748. goto failed_mount_wq;
  3749. }
  3750. if (ext4_has_feature_ea_inode(sb)) {
  3751. sbi->s_ea_inode_cache = ext4_xattr_create_cache();
  3752. if (!sbi->s_ea_inode_cache) {
  3753. ext4_msg(sb, KERN_ERR,
  3754. "Failed to create ea_inode_cache");
  3755. goto failed_mount_wq;
  3756. }
  3757. }
  3758. }
  3759. if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
  3760. (blocksize != PAGE_SIZE)) {
  3761. ext4_msg(sb, KERN_ERR,
  3762. "Unsupported blocksize for fs encryption");
  3763. goto failed_mount_wq;
  3764. }
  3765. if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
  3766. !ext4_has_feature_encrypt(sb)) {
  3767. ext4_set_feature_encrypt(sb);
  3768. ext4_commit_super(sb, 1);
  3769. }
  3770. /*
  3771. * Get the # of file system overhead blocks from the
  3772. * superblock if present.
  3773. */
  3774. if (es->s_overhead_clusters)
  3775. sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
  3776. else {
  3777. err = ext4_calculate_overhead(sb);
  3778. if (err)
  3779. goto failed_mount_wq;
  3780. }
  3781. /*
  3782. * The maximum number of concurrent works can be high and
  3783. * concurrency isn't really necessary. Limit it to 1.
  3784. */
  3785. EXT4_SB(sb)->rsv_conversion_wq =
  3786. alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
  3787. if (!EXT4_SB(sb)->rsv_conversion_wq) {
  3788. printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
  3789. ret = -ENOMEM;
  3790. goto failed_mount4;
  3791. }
  3792. /*
  3793. * The jbd2_journal_load will have done any necessary log recovery,
  3794. * so we can safely mount the rest of the filesystem now.
  3795. */
  3796. root = ext4_iget(sb, EXT4_ROOT_INO);
  3797. if (IS_ERR(root)) {
  3798. ext4_msg(sb, KERN_ERR, "get root inode failed");
  3799. ret = PTR_ERR(root);
  3800. root = NULL;
  3801. goto failed_mount4;
  3802. }
  3803. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  3804. ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
  3805. iput(root);
  3806. goto failed_mount4;
  3807. }
  3808. sb->s_root = d_make_root(root);
  3809. if (!sb->s_root) {
  3810. ext4_msg(sb, KERN_ERR, "get root dentry failed");
  3811. ret = -ENOMEM;
  3812. goto failed_mount4;
  3813. }
  3814. ret = ext4_setup_super(sb, es, sb_rdonly(sb));
  3815. if (ret == -EROFS) {
  3816. sb->s_flags |= SB_RDONLY;
  3817. ret = 0;
  3818. } else if (ret)
  3819. goto failed_mount4a;
  3820. /* determine the minimum size of new large inodes, if present */
  3821. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
  3822. sbi->s_want_extra_isize == 0) {
  3823. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3824. EXT4_GOOD_OLD_INODE_SIZE;
  3825. if (ext4_has_feature_extra_isize(sb)) {
  3826. if (sbi->s_want_extra_isize <
  3827. le16_to_cpu(es->s_want_extra_isize))
  3828. sbi->s_want_extra_isize =
  3829. le16_to_cpu(es->s_want_extra_isize);
  3830. if (sbi->s_want_extra_isize <
  3831. le16_to_cpu(es->s_min_extra_isize))
  3832. sbi->s_want_extra_isize =
  3833. le16_to_cpu(es->s_min_extra_isize);
  3834. }
  3835. }
  3836. /* Check if enough inode space is available */
  3837. if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
  3838. sbi->s_inode_size) {
  3839. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3840. EXT4_GOOD_OLD_INODE_SIZE;
  3841. ext4_msg(sb, KERN_INFO, "required extra inode space not"
  3842. "available");
  3843. }
  3844. ext4_set_resv_clusters(sb);
  3845. err = ext4_setup_system_zone(sb);
  3846. if (err) {
  3847. ext4_msg(sb, KERN_ERR, "failed to initialize system "
  3848. "zone (%d)", err);
  3849. goto failed_mount4a;
  3850. }
  3851. ext4_ext_init(sb);
  3852. err = ext4_mb_init(sb);
  3853. if (err) {
  3854. ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
  3855. err);
  3856. goto failed_mount5;
  3857. }
  3858. block = ext4_count_free_clusters(sb);
  3859. ext4_free_blocks_count_set(sbi->s_es,
  3860. EXT4_C2B(sbi, block));
  3861. err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
  3862. GFP_KERNEL);
  3863. if (!err) {
  3864. unsigned long freei = ext4_count_free_inodes(sb);
  3865. sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
  3866. err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
  3867. GFP_KERNEL);
  3868. }
  3869. if (!err)
  3870. err = percpu_counter_init(&sbi->s_dirs_counter,
  3871. ext4_count_dirs(sb), GFP_KERNEL);
  3872. if (!err)
  3873. err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
  3874. GFP_KERNEL);
  3875. if (!err)
  3876. err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
  3877. if (err) {
  3878. ext4_msg(sb, KERN_ERR, "insufficient memory");
  3879. goto failed_mount6;
  3880. }
  3881. if (ext4_has_feature_flex_bg(sb))
  3882. if (!ext4_fill_flex_info(sb)) {
  3883. ext4_msg(sb, KERN_ERR,
  3884. "unable to initialize "
  3885. "flex_bg meta info!");
  3886. goto failed_mount6;
  3887. }
  3888. err = ext4_register_li_request(sb, first_not_zeroed);
  3889. if (err)
  3890. goto failed_mount6;
  3891. err = ext4_register_sysfs(sb);
  3892. if (err)
  3893. goto failed_mount7;
  3894. #ifdef CONFIG_QUOTA
  3895. /* Enable quota usage during mount. */
  3896. if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
  3897. err = ext4_enable_quotas(sb);
  3898. if (err)
  3899. goto failed_mount8;
  3900. }
  3901. #endif /* CONFIG_QUOTA */
  3902. EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
  3903. ext4_orphan_cleanup(sb, es);
  3904. EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
  3905. if (needs_recovery) {
  3906. ext4_msg(sb, KERN_INFO, "recovery complete");
  3907. ext4_mark_recovery_complete(sb, es);
  3908. }
  3909. if (EXT4_SB(sb)->s_journal) {
  3910. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  3911. descr = " journalled data mode";
  3912. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  3913. descr = " ordered data mode";
  3914. else
  3915. descr = " writeback data mode";
  3916. } else
  3917. descr = "out journal";
  3918. if (test_opt(sb, DISCARD)) {
  3919. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  3920. if (!blk_queue_discard(q))
  3921. ext4_msg(sb, KERN_WARNING,
  3922. "mounting with \"discard\" option, but "
  3923. "the device does not support discard");
  3924. }
  3925. if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
  3926. ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
  3927. "Opts: %.*s%s%s", descr,
  3928. (int) sizeof(sbi->s_es->s_mount_opts),
  3929. sbi->s_es->s_mount_opts,
  3930. *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
  3931. if (es->s_error_count)
  3932. mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
  3933. /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
  3934. ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
  3935. ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
  3936. ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
  3937. kfree(orig_data);
  3938. return 0;
  3939. cantfind_ext4:
  3940. if (!silent)
  3941. ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
  3942. goto failed_mount;
  3943. #ifdef CONFIG_QUOTA
  3944. failed_mount8:
  3945. ext4_unregister_sysfs(sb);
  3946. #endif
  3947. failed_mount7:
  3948. ext4_unregister_li_request(sb);
  3949. failed_mount6:
  3950. ext4_mb_release(sb);
  3951. if (sbi->s_flex_groups)
  3952. kvfree(sbi->s_flex_groups);
  3953. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  3954. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  3955. percpu_counter_destroy(&sbi->s_dirs_counter);
  3956. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  3957. failed_mount5:
  3958. ext4_ext_release(sb);
  3959. ext4_release_system_zone(sb);
  3960. failed_mount4a:
  3961. dput(sb->s_root);
  3962. sb->s_root = NULL;
  3963. failed_mount4:
  3964. ext4_msg(sb, KERN_ERR, "mount failed");
  3965. if (EXT4_SB(sb)->rsv_conversion_wq)
  3966. destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
  3967. failed_mount_wq:
  3968. if (sbi->s_ea_inode_cache) {
  3969. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  3970. sbi->s_ea_inode_cache = NULL;
  3971. }
  3972. if (sbi->s_ea_block_cache) {
  3973. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  3974. sbi->s_ea_block_cache = NULL;
  3975. }
  3976. if (sbi->s_journal) {
  3977. jbd2_journal_destroy(sbi->s_journal);
  3978. sbi->s_journal = NULL;
  3979. }
  3980. failed_mount3a:
  3981. ext4_es_unregister_shrinker(sbi);
  3982. failed_mount3:
  3983. del_timer_sync(&sbi->s_err_report);
  3984. if (sbi->s_mmp_tsk)
  3985. kthread_stop(sbi->s_mmp_tsk);
  3986. failed_mount2:
  3987. for (i = 0; i < db_count; i++)
  3988. brelse(sbi->s_group_desc[i]);
  3989. kvfree(sbi->s_group_desc);
  3990. failed_mount:
  3991. if (sbi->s_chksum_driver)
  3992. crypto_free_shash(sbi->s_chksum_driver);
  3993. #ifdef CONFIG_QUOTA
  3994. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  3995. kfree(sbi->s_qf_names[i]);
  3996. #endif
  3997. ext4_blkdev_remove(sbi);
  3998. brelse(bh);
  3999. out_fail:
  4000. sb->s_fs_info = NULL;
  4001. kfree(sbi->s_blockgroup_lock);
  4002. out_free_base:
  4003. kfree(sbi);
  4004. kfree(orig_data);
  4005. fs_put_dax(dax_dev);
  4006. return err ? err : ret;
  4007. }
  4008. /*
  4009. * Setup any per-fs journal parameters now. We'll do this both on
  4010. * initial mount, once the journal has been initialised but before we've
  4011. * done any recovery; and again on any subsequent remount.
  4012. */
  4013. static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
  4014. {
  4015. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4016. journal->j_commit_interval = sbi->s_commit_interval;
  4017. journal->j_min_batch_time = sbi->s_min_batch_time;
  4018. journal->j_max_batch_time = sbi->s_max_batch_time;
  4019. write_lock(&journal->j_state_lock);
  4020. if (test_opt(sb, BARRIER))
  4021. journal->j_flags |= JBD2_BARRIER;
  4022. else
  4023. journal->j_flags &= ~JBD2_BARRIER;
  4024. if (test_opt(sb, DATA_ERR_ABORT))
  4025. journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
  4026. else
  4027. journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
  4028. write_unlock(&journal->j_state_lock);
  4029. }
  4030. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  4031. unsigned int journal_inum)
  4032. {
  4033. struct inode *journal_inode;
  4034. /*
  4035. * Test for the existence of a valid inode on disk. Bad things
  4036. * happen if we iget() an unused inode, as the subsequent iput()
  4037. * will try to delete it.
  4038. */
  4039. journal_inode = ext4_iget(sb, journal_inum);
  4040. if (IS_ERR(journal_inode)) {
  4041. ext4_msg(sb, KERN_ERR, "no journal found");
  4042. return NULL;
  4043. }
  4044. if (!journal_inode->i_nlink) {
  4045. make_bad_inode(journal_inode);
  4046. iput(journal_inode);
  4047. ext4_msg(sb, KERN_ERR, "journal inode is deleted");
  4048. return NULL;
  4049. }
  4050. jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
  4051. journal_inode, journal_inode->i_size);
  4052. if (!S_ISREG(journal_inode->i_mode)) {
  4053. ext4_msg(sb, KERN_ERR, "invalid journal inode");
  4054. iput(journal_inode);
  4055. return NULL;
  4056. }
  4057. return journal_inode;
  4058. }
  4059. static journal_t *ext4_get_journal(struct super_block *sb,
  4060. unsigned int journal_inum)
  4061. {
  4062. struct inode *journal_inode;
  4063. journal_t *journal;
  4064. BUG_ON(!ext4_has_feature_journal(sb));
  4065. journal_inode = ext4_get_journal_inode(sb, journal_inum);
  4066. if (!journal_inode)
  4067. return NULL;
  4068. journal = jbd2_journal_init_inode(journal_inode);
  4069. if (!journal) {
  4070. ext4_msg(sb, KERN_ERR, "Could not load journal inode");
  4071. iput(journal_inode);
  4072. return NULL;
  4073. }
  4074. journal->j_private = sb;
  4075. ext4_init_journal_params(sb, journal);
  4076. return journal;
  4077. }
  4078. static journal_t *ext4_get_dev_journal(struct super_block *sb,
  4079. dev_t j_dev)
  4080. {
  4081. struct buffer_head *bh;
  4082. journal_t *journal;
  4083. ext4_fsblk_t start;
  4084. ext4_fsblk_t len;
  4085. int hblock, blocksize;
  4086. ext4_fsblk_t sb_block;
  4087. unsigned long offset;
  4088. struct ext4_super_block *es;
  4089. struct block_device *bdev;
  4090. BUG_ON(!ext4_has_feature_journal(sb));
  4091. bdev = ext4_blkdev_get(j_dev, sb);
  4092. if (bdev == NULL)
  4093. return NULL;
  4094. blocksize = sb->s_blocksize;
  4095. hblock = bdev_logical_block_size(bdev);
  4096. if (blocksize < hblock) {
  4097. ext4_msg(sb, KERN_ERR,
  4098. "blocksize too small for journal device");
  4099. goto out_bdev;
  4100. }
  4101. sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
  4102. offset = EXT4_MIN_BLOCK_SIZE % blocksize;
  4103. set_blocksize(bdev, blocksize);
  4104. if (!(bh = __bread(bdev, sb_block, blocksize))) {
  4105. ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
  4106. "external journal");
  4107. goto out_bdev;
  4108. }
  4109. es = (struct ext4_super_block *) (bh->b_data + offset);
  4110. if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
  4111. !(le32_to_cpu(es->s_feature_incompat) &
  4112. EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
  4113. ext4_msg(sb, KERN_ERR, "external journal has "
  4114. "bad superblock");
  4115. brelse(bh);
  4116. goto out_bdev;
  4117. }
  4118. if ((le32_to_cpu(es->s_feature_ro_compat) &
  4119. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
  4120. es->s_checksum != ext4_superblock_csum(sb, es)) {
  4121. ext4_msg(sb, KERN_ERR, "external journal has "
  4122. "corrupt superblock");
  4123. brelse(bh);
  4124. goto out_bdev;
  4125. }
  4126. if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
  4127. ext4_msg(sb, KERN_ERR, "journal UUID does not match");
  4128. brelse(bh);
  4129. goto out_bdev;
  4130. }
  4131. len = ext4_blocks_count(es);
  4132. start = sb_block + 1;
  4133. brelse(bh); /* we're done with the superblock */
  4134. journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
  4135. start, len, blocksize);
  4136. if (!journal) {
  4137. ext4_msg(sb, KERN_ERR, "failed to create device journal");
  4138. goto out_bdev;
  4139. }
  4140. journal->j_private = sb;
  4141. ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
  4142. wait_on_buffer(journal->j_sb_buffer);
  4143. if (!buffer_uptodate(journal->j_sb_buffer)) {
  4144. ext4_msg(sb, KERN_ERR, "I/O error on journal device");
  4145. goto out_journal;
  4146. }
  4147. if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
  4148. ext4_msg(sb, KERN_ERR, "External journal has more than one "
  4149. "user (unsupported) - %d",
  4150. be32_to_cpu(journal->j_superblock->s_nr_users));
  4151. goto out_journal;
  4152. }
  4153. EXT4_SB(sb)->journal_bdev = bdev;
  4154. ext4_init_journal_params(sb, journal);
  4155. return journal;
  4156. out_journal:
  4157. jbd2_journal_destroy(journal);
  4158. out_bdev:
  4159. ext4_blkdev_put(bdev);
  4160. return NULL;
  4161. }
  4162. static int ext4_load_journal(struct super_block *sb,
  4163. struct ext4_super_block *es,
  4164. unsigned long journal_devnum)
  4165. {
  4166. journal_t *journal;
  4167. unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
  4168. dev_t journal_dev;
  4169. int err = 0;
  4170. int really_read_only;
  4171. BUG_ON(!ext4_has_feature_journal(sb));
  4172. if (journal_devnum &&
  4173. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4174. ext4_msg(sb, KERN_INFO, "external journal device major/minor "
  4175. "numbers have changed");
  4176. journal_dev = new_decode_dev(journal_devnum);
  4177. } else
  4178. journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
  4179. really_read_only = bdev_read_only(sb->s_bdev);
  4180. /*
  4181. * Are we loading a blank journal or performing recovery after a
  4182. * crash? For recovery, we need to check in advance whether we
  4183. * can get read-write access to the device.
  4184. */
  4185. if (ext4_has_feature_journal_needs_recovery(sb)) {
  4186. if (sb_rdonly(sb)) {
  4187. ext4_msg(sb, KERN_INFO, "INFO: recovery "
  4188. "required on readonly filesystem");
  4189. if (really_read_only) {
  4190. ext4_msg(sb, KERN_ERR, "write access "
  4191. "unavailable, cannot proceed "
  4192. "(try mounting with noload)");
  4193. return -EROFS;
  4194. }
  4195. ext4_msg(sb, KERN_INFO, "write access will "
  4196. "be enabled during recovery");
  4197. }
  4198. }
  4199. if (journal_inum && journal_dev) {
  4200. ext4_msg(sb, KERN_ERR, "filesystem has both journal "
  4201. "and inode journals!");
  4202. return -EINVAL;
  4203. }
  4204. if (journal_inum) {
  4205. if (!(journal = ext4_get_journal(sb, journal_inum)))
  4206. return -EINVAL;
  4207. } else {
  4208. if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
  4209. return -EINVAL;
  4210. }
  4211. if (!(journal->j_flags & JBD2_BARRIER))
  4212. ext4_msg(sb, KERN_INFO, "barriers disabled");
  4213. if (!ext4_has_feature_journal_needs_recovery(sb))
  4214. err = jbd2_journal_wipe(journal, !really_read_only);
  4215. if (!err) {
  4216. char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
  4217. if (save)
  4218. memcpy(save, ((char *) es) +
  4219. EXT4_S_ERR_START, EXT4_S_ERR_LEN);
  4220. err = jbd2_journal_load(journal);
  4221. if (save)
  4222. memcpy(((char *) es) + EXT4_S_ERR_START,
  4223. save, EXT4_S_ERR_LEN);
  4224. kfree(save);
  4225. }
  4226. if (err) {
  4227. ext4_msg(sb, KERN_ERR, "error loading journal");
  4228. jbd2_journal_destroy(journal);
  4229. return err;
  4230. }
  4231. EXT4_SB(sb)->s_journal = journal;
  4232. ext4_clear_journal_err(sb, es);
  4233. if (!really_read_only && journal_devnum &&
  4234. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4235. es->s_journal_dev = cpu_to_le32(journal_devnum);
  4236. /* Make sure we flush the recovery flag to disk. */
  4237. ext4_commit_super(sb, 1);
  4238. }
  4239. return 0;
  4240. }
  4241. static int ext4_commit_super(struct super_block *sb, int sync)
  4242. {
  4243. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  4244. struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
  4245. int error = 0;
  4246. if (!sbh || block_device_ejected(sb))
  4247. return error;
  4248. /*
  4249. * If the file system is mounted read-only, don't update the
  4250. * superblock write time. This avoids updating the superblock
  4251. * write time when we are mounting the root file system
  4252. * read/only but we need to replay the journal; at that point,
  4253. * for people who are east of GMT and who make their clock
  4254. * tick in localtime for Windows bug-for-bug compatibility,
  4255. * the clock is set in the future, and this will cause e2fsck
  4256. * to complain and force a full file system check.
  4257. */
  4258. if (!(sb->s_flags & SB_RDONLY))
  4259. es->s_wtime = cpu_to_le32(get_seconds());
  4260. if (sb->s_bdev->bd_part)
  4261. es->s_kbytes_written =
  4262. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
  4263. ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
  4264. EXT4_SB(sb)->s_sectors_written_start) >> 1));
  4265. else
  4266. es->s_kbytes_written =
  4267. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
  4268. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
  4269. ext4_free_blocks_count_set(es,
  4270. EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
  4271. &EXT4_SB(sb)->s_freeclusters_counter)));
  4272. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
  4273. es->s_free_inodes_count =
  4274. cpu_to_le32(percpu_counter_sum_positive(
  4275. &EXT4_SB(sb)->s_freeinodes_counter));
  4276. BUFFER_TRACE(sbh, "marking dirty");
  4277. ext4_superblock_csum_set(sb);
  4278. if (sync)
  4279. lock_buffer(sbh);
  4280. if (buffer_write_io_error(sbh)) {
  4281. /*
  4282. * Oh, dear. A previous attempt to write the
  4283. * superblock failed. This could happen because the
  4284. * USB device was yanked out. Or it could happen to
  4285. * be a transient write error and maybe the block will
  4286. * be remapped. Nothing we can do but to retry the
  4287. * write and hope for the best.
  4288. */
  4289. ext4_msg(sb, KERN_ERR, "previous I/O error to "
  4290. "superblock detected");
  4291. clear_buffer_write_io_error(sbh);
  4292. set_buffer_uptodate(sbh);
  4293. }
  4294. mark_buffer_dirty(sbh);
  4295. if (sync) {
  4296. unlock_buffer(sbh);
  4297. error = __sync_dirty_buffer(sbh,
  4298. REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
  4299. if (buffer_write_io_error(sbh)) {
  4300. ext4_msg(sb, KERN_ERR, "I/O error while writing "
  4301. "superblock");
  4302. clear_buffer_write_io_error(sbh);
  4303. set_buffer_uptodate(sbh);
  4304. }
  4305. }
  4306. return error;
  4307. }
  4308. /*
  4309. * Have we just finished recovery? If so, and if we are mounting (or
  4310. * remounting) the filesystem readonly, then we will end up with a
  4311. * consistent fs on disk. Record that fact.
  4312. */
  4313. static void ext4_mark_recovery_complete(struct super_block *sb,
  4314. struct ext4_super_block *es)
  4315. {
  4316. journal_t *journal = EXT4_SB(sb)->s_journal;
  4317. if (!ext4_has_feature_journal(sb)) {
  4318. BUG_ON(journal != NULL);
  4319. return;
  4320. }
  4321. jbd2_journal_lock_updates(journal);
  4322. if (jbd2_journal_flush(journal) < 0)
  4323. goto out;
  4324. if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
  4325. ext4_clear_feature_journal_needs_recovery(sb);
  4326. ext4_commit_super(sb, 1);
  4327. }
  4328. out:
  4329. jbd2_journal_unlock_updates(journal);
  4330. }
  4331. /*
  4332. * If we are mounting (or read-write remounting) a filesystem whose journal
  4333. * has recorded an error from a previous lifetime, move that error to the
  4334. * main filesystem now.
  4335. */
  4336. static void ext4_clear_journal_err(struct super_block *sb,
  4337. struct ext4_super_block *es)
  4338. {
  4339. journal_t *journal;
  4340. int j_errno;
  4341. const char *errstr;
  4342. BUG_ON(!ext4_has_feature_journal(sb));
  4343. journal = EXT4_SB(sb)->s_journal;
  4344. /*
  4345. * Now check for any error status which may have been recorded in the
  4346. * journal by a prior ext4_error() or ext4_abort()
  4347. */
  4348. j_errno = jbd2_journal_errno(journal);
  4349. if (j_errno) {
  4350. char nbuf[16];
  4351. errstr = ext4_decode_error(sb, j_errno, nbuf);
  4352. ext4_warning(sb, "Filesystem error recorded "
  4353. "from previous mount: %s", errstr);
  4354. ext4_warning(sb, "Marking fs in need of filesystem check.");
  4355. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  4356. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  4357. ext4_commit_super(sb, 1);
  4358. jbd2_journal_clear_err(journal);
  4359. jbd2_journal_update_sb_errno(journal);
  4360. }
  4361. }
  4362. /*
  4363. * Force the running and committing transactions to commit,
  4364. * and wait on the commit.
  4365. */
  4366. int ext4_force_commit(struct super_block *sb)
  4367. {
  4368. journal_t *journal;
  4369. if (sb_rdonly(sb))
  4370. return 0;
  4371. journal = EXT4_SB(sb)->s_journal;
  4372. return ext4_journal_force_commit(journal);
  4373. }
  4374. static int ext4_sync_fs(struct super_block *sb, int wait)
  4375. {
  4376. int ret = 0;
  4377. tid_t target;
  4378. bool needs_barrier = false;
  4379. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4380. if (unlikely(ext4_forced_shutdown(sbi)))
  4381. return 0;
  4382. trace_ext4_sync_fs(sb, wait);
  4383. flush_workqueue(sbi->rsv_conversion_wq);
  4384. /*
  4385. * Writeback quota in non-journalled quota case - journalled quota has
  4386. * no dirty dquots
  4387. */
  4388. dquot_writeback_dquots(sb, -1);
  4389. /*
  4390. * Data writeback is possible w/o journal transaction, so barrier must
  4391. * being sent at the end of the function. But we can skip it if
  4392. * transaction_commit will do it for us.
  4393. */
  4394. if (sbi->s_journal) {
  4395. target = jbd2_get_latest_transaction(sbi->s_journal);
  4396. if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
  4397. !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
  4398. needs_barrier = true;
  4399. if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
  4400. if (wait)
  4401. ret = jbd2_log_wait_commit(sbi->s_journal,
  4402. target);
  4403. }
  4404. } else if (wait && test_opt(sb, BARRIER))
  4405. needs_barrier = true;
  4406. if (needs_barrier) {
  4407. int err;
  4408. err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
  4409. if (!ret)
  4410. ret = err;
  4411. }
  4412. return ret;
  4413. }
  4414. /*
  4415. * LVM calls this function before a (read-only) snapshot is created. This
  4416. * gives us a chance to flush the journal completely and mark the fs clean.
  4417. *
  4418. * Note that only this function cannot bring a filesystem to be in a clean
  4419. * state independently. It relies on upper layer to stop all data & metadata
  4420. * modifications.
  4421. */
  4422. static int ext4_freeze(struct super_block *sb)
  4423. {
  4424. int error = 0;
  4425. journal_t *journal;
  4426. if (sb_rdonly(sb))
  4427. return 0;
  4428. journal = EXT4_SB(sb)->s_journal;
  4429. if (journal) {
  4430. /* Now we set up the journal barrier. */
  4431. jbd2_journal_lock_updates(journal);
  4432. /*
  4433. * Don't clear the needs_recovery flag if we failed to
  4434. * flush the journal.
  4435. */
  4436. error = jbd2_journal_flush(journal);
  4437. if (error < 0)
  4438. goto out;
  4439. /* Journal blocked and flushed, clear needs_recovery flag. */
  4440. ext4_clear_feature_journal_needs_recovery(sb);
  4441. }
  4442. error = ext4_commit_super(sb, 1);
  4443. out:
  4444. if (journal)
  4445. /* we rely on upper layer to stop further updates */
  4446. jbd2_journal_unlock_updates(journal);
  4447. return error;
  4448. }
  4449. /*
  4450. * Called by LVM after the snapshot is done. We need to reset the RECOVER
  4451. * flag here, even though the filesystem is not technically dirty yet.
  4452. */
  4453. static int ext4_unfreeze(struct super_block *sb)
  4454. {
  4455. if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
  4456. return 0;
  4457. if (EXT4_SB(sb)->s_journal) {
  4458. /* Reset the needs_recovery flag before the fs is unlocked. */
  4459. ext4_set_feature_journal_needs_recovery(sb);
  4460. }
  4461. ext4_commit_super(sb, 1);
  4462. return 0;
  4463. }
  4464. /*
  4465. * Structure to save mount options for ext4_remount's benefit
  4466. */
  4467. struct ext4_mount_options {
  4468. unsigned long s_mount_opt;
  4469. unsigned long s_mount_opt2;
  4470. kuid_t s_resuid;
  4471. kgid_t s_resgid;
  4472. unsigned long s_commit_interval;
  4473. u32 s_min_batch_time, s_max_batch_time;
  4474. #ifdef CONFIG_QUOTA
  4475. int s_jquota_fmt;
  4476. char *s_qf_names[EXT4_MAXQUOTAS];
  4477. #endif
  4478. };
  4479. static int ext4_remount(struct super_block *sb, int *flags, char *data)
  4480. {
  4481. struct ext4_super_block *es;
  4482. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4483. unsigned long old_sb_flags;
  4484. struct ext4_mount_options old_opts;
  4485. int enable_quota = 0;
  4486. ext4_group_t g;
  4487. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  4488. int err = 0;
  4489. #ifdef CONFIG_QUOTA
  4490. int i, j;
  4491. #endif
  4492. char *orig_data = kstrdup(data, GFP_KERNEL);
  4493. /* Store the original options */
  4494. old_sb_flags = sb->s_flags;
  4495. old_opts.s_mount_opt = sbi->s_mount_opt;
  4496. old_opts.s_mount_opt2 = sbi->s_mount_opt2;
  4497. old_opts.s_resuid = sbi->s_resuid;
  4498. old_opts.s_resgid = sbi->s_resgid;
  4499. old_opts.s_commit_interval = sbi->s_commit_interval;
  4500. old_opts.s_min_batch_time = sbi->s_min_batch_time;
  4501. old_opts.s_max_batch_time = sbi->s_max_batch_time;
  4502. #ifdef CONFIG_QUOTA
  4503. old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
  4504. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4505. if (sbi->s_qf_names[i]) {
  4506. old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
  4507. GFP_KERNEL);
  4508. if (!old_opts.s_qf_names[i]) {
  4509. for (j = 0; j < i; j++)
  4510. kfree(old_opts.s_qf_names[j]);
  4511. kfree(orig_data);
  4512. return -ENOMEM;
  4513. }
  4514. } else
  4515. old_opts.s_qf_names[i] = NULL;
  4516. #endif
  4517. if (sbi->s_journal && sbi->s_journal->j_task->io_context)
  4518. journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
  4519. if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
  4520. err = -EINVAL;
  4521. goto restore_opts;
  4522. }
  4523. if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
  4524. test_opt(sb, JOURNAL_CHECKSUM)) {
  4525. ext4_msg(sb, KERN_ERR, "changing journal_checksum "
  4526. "during remount not supported; ignoring");
  4527. sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
  4528. }
  4529. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  4530. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  4531. ext4_msg(sb, KERN_ERR, "can't mount with "
  4532. "both data=journal and delalloc");
  4533. err = -EINVAL;
  4534. goto restore_opts;
  4535. }
  4536. if (test_opt(sb, DIOREAD_NOLOCK)) {
  4537. ext4_msg(sb, KERN_ERR, "can't mount with "
  4538. "both data=journal and dioread_nolock");
  4539. err = -EINVAL;
  4540. goto restore_opts;
  4541. }
  4542. if (test_opt(sb, DAX)) {
  4543. ext4_msg(sb, KERN_ERR, "can't mount with "
  4544. "both data=journal and dax");
  4545. err = -EINVAL;
  4546. goto restore_opts;
  4547. }
  4548. } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
  4549. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  4550. ext4_msg(sb, KERN_ERR, "can't mount with "
  4551. "journal_async_commit in data=ordered mode");
  4552. err = -EINVAL;
  4553. goto restore_opts;
  4554. }
  4555. }
  4556. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
  4557. ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
  4558. err = -EINVAL;
  4559. goto restore_opts;
  4560. }
  4561. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
  4562. ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
  4563. "dax flag with busy inodes while remounting");
  4564. sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
  4565. }
  4566. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
  4567. ext4_abort(sb, "Abort forced by user");
  4568. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  4569. (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
  4570. es = sbi->s_es;
  4571. if (sbi->s_journal) {
  4572. ext4_init_journal_params(sb, sbi->s_journal);
  4573. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  4574. }
  4575. if (*flags & SB_LAZYTIME)
  4576. sb->s_flags |= SB_LAZYTIME;
  4577. if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
  4578. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
  4579. err = -EROFS;
  4580. goto restore_opts;
  4581. }
  4582. if (*flags & SB_RDONLY) {
  4583. err = sync_filesystem(sb);
  4584. if (err < 0)
  4585. goto restore_opts;
  4586. err = dquot_suspend(sb, -1);
  4587. if (err < 0)
  4588. goto restore_opts;
  4589. /*
  4590. * First of all, the unconditional stuff we have to do
  4591. * to disable replay of the journal when we next remount
  4592. */
  4593. sb->s_flags |= SB_RDONLY;
  4594. /*
  4595. * OK, test if we are remounting a valid rw partition
  4596. * readonly, and if so set the rdonly flag and then
  4597. * mark the partition as valid again.
  4598. */
  4599. if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
  4600. (sbi->s_mount_state & EXT4_VALID_FS))
  4601. es->s_state = cpu_to_le16(sbi->s_mount_state);
  4602. if (sbi->s_journal)
  4603. ext4_mark_recovery_complete(sb, es);
  4604. } else {
  4605. /* Make sure we can mount this feature set readwrite */
  4606. if (ext4_has_feature_readonly(sb) ||
  4607. !ext4_feature_set_ok(sb, 0)) {
  4608. err = -EROFS;
  4609. goto restore_opts;
  4610. }
  4611. /*
  4612. * Make sure the group descriptor checksums
  4613. * are sane. If they aren't, refuse to remount r/w.
  4614. */
  4615. for (g = 0; g < sbi->s_groups_count; g++) {
  4616. struct ext4_group_desc *gdp =
  4617. ext4_get_group_desc(sb, g, NULL);
  4618. if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
  4619. ext4_msg(sb, KERN_ERR,
  4620. "ext4_remount: Checksum for group %u failed (%u!=%u)",
  4621. g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
  4622. le16_to_cpu(gdp->bg_checksum));
  4623. err = -EFSBADCRC;
  4624. goto restore_opts;
  4625. }
  4626. }
  4627. /*
  4628. * If we have an unprocessed orphan list hanging
  4629. * around from a previously readonly bdev mount,
  4630. * require a full umount/remount for now.
  4631. */
  4632. if (es->s_last_orphan) {
  4633. ext4_msg(sb, KERN_WARNING, "Couldn't "
  4634. "remount RDWR because of unprocessed "
  4635. "orphan inode list. Please "
  4636. "umount/remount instead");
  4637. err = -EINVAL;
  4638. goto restore_opts;
  4639. }
  4640. /*
  4641. * Mounting a RDONLY partition read-write, so reread
  4642. * and store the current valid flag. (It may have
  4643. * been changed by e2fsck since we originally mounted
  4644. * the partition.)
  4645. */
  4646. if (sbi->s_journal)
  4647. ext4_clear_journal_err(sb, es);
  4648. sbi->s_mount_state = le16_to_cpu(es->s_state);
  4649. err = ext4_setup_super(sb, es, 0);
  4650. if (err)
  4651. goto restore_opts;
  4652. sb->s_flags &= ~SB_RDONLY;
  4653. if (ext4_has_feature_mmp(sb))
  4654. if (ext4_multi_mount_protect(sb,
  4655. le64_to_cpu(es->s_mmp_block))) {
  4656. err = -EROFS;
  4657. goto restore_opts;
  4658. }
  4659. enable_quota = 1;
  4660. }
  4661. }
  4662. /*
  4663. * Reinitialize lazy itable initialization thread based on
  4664. * current settings
  4665. */
  4666. if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
  4667. ext4_unregister_li_request(sb);
  4668. else {
  4669. ext4_group_t first_not_zeroed;
  4670. first_not_zeroed = ext4_has_uninit_itable(sb);
  4671. ext4_register_li_request(sb, first_not_zeroed);
  4672. }
  4673. ext4_setup_system_zone(sb);
  4674. if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
  4675. err = ext4_commit_super(sb, 1);
  4676. if (err)
  4677. goto restore_opts;
  4678. }
  4679. #ifdef CONFIG_QUOTA
  4680. /* Release old quota file names */
  4681. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4682. kfree(old_opts.s_qf_names[i]);
  4683. if (enable_quota) {
  4684. if (sb_any_quota_suspended(sb))
  4685. dquot_resume(sb, -1);
  4686. else if (ext4_has_feature_quota(sb)) {
  4687. err = ext4_enable_quotas(sb);
  4688. if (err)
  4689. goto restore_opts;
  4690. }
  4691. }
  4692. #endif
  4693. *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
  4694. ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
  4695. kfree(orig_data);
  4696. return 0;
  4697. restore_opts:
  4698. sb->s_flags = old_sb_flags;
  4699. sbi->s_mount_opt = old_opts.s_mount_opt;
  4700. sbi->s_mount_opt2 = old_opts.s_mount_opt2;
  4701. sbi->s_resuid = old_opts.s_resuid;
  4702. sbi->s_resgid = old_opts.s_resgid;
  4703. sbi->s_commit_interval = old_opts.s_commit_interval;
  4704. sbi->s_min_batch_time = old_opts.s_min_batch_time;
  4705. sbi->s_max_batch_time = old_opts.s_max_batch_time;
  4706. #ifdef CONFIG_QUOTA
  4707. sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
  4708. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  4709. kfree(sbi->s_qf_names[i]);
  4710. sbi->s_qf_names[i] = old_opts.s_qf_names[i];
  4711. }
  4712. #endif
  4713. kfree(orig_data);
  4714. return err;
  4715. }
  4716. #ifdef CONFIG_QUOTA
  4717. static int ext4_statfs_project(struct super_block *sb,
  4718. kprojid_t projid, struct kstatfs *buf)
  4719. {
  4720. struct kqid qid;
  4721. struct dquot *dquot;
  4722. u64 limit;
  4723. u64 curblock;
  4724. qid = make_kqid_projid(projid);
  4725. dquot = dqget(sb, qid);
  4726. if (IS_ERR(dquot))
  4727. return PTR_ERR(dquot);
  4728. spin_lock(&dquot->dq_dqb_lock);
  4729. limit = (dquot->dq_dqb.dqb_bsoftlimit ?
  4730. dquot->dq_dqb.dqb_bsoftlimit :
  4731. dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
  4732. if (limit && buf->f_blocks > limit) {
  4733. curblock = (dquot->dq_dqb.dqb_curspace +
  4734. dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
  4735. buf->f_blocks = limit;
  4736. buf->f_bfree = buf->f_bavail =
  4737. (buf->f_blocks > curblock) ?
  4738. (buf->f_blocks - curblock) : 0;
  4739. }
  4740. limit = dquot->dq_dqb.dqb_isoftlimit ?
  4741. dquot->dq_dqb.dqb_isoftlimit :
  4742. dquot->dq_dqb.dqb_ihardlimit;
  4743. if (limit && buf->f_files > limit) {
  4744. buf->f_files = limit;
  4745. buf->f_ffree =
  4746. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  4747. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  4748. }
  4749. spin_unlock(&dquot->dq_dqb_lock);
  4750. dqput(dquot);
  4751. return 0;
  4752. }
  4753. #endif
  4754. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
  4755. {
  4756. struct super_block *sb = dentry->d_sb;
  4757. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4758. struct ext4_super_block *es = sbi->s_es;
  4759. ext4_fsblk_t overhead = 0, resv_blocks;
  4760. u64 fsid;
  4761. s64 bfree;
  4762. resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
  4763. if (!test_opt(sb, MINIX_DF))
  4764. overhead = sbi->s_overhead;
  4765. buf->f_type = EXT4_SUPER_MAGIC;
  4766. buf->f_bsize = sb->s_blocksize;
  4767. buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
  4768. bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
  4769. percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
  4770. /* prevent underflow in case that few free space is available */
  4771. buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
  4772. buf->f_bavail = buf->f_bfree -
  4773. (ext4_r_blocks_count(es) + resv_blocks);
  4774. if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
  4775. buf->f_bavail = 0;
  4776. buf->f_files = le32_to_cpu(es->s_inodes_count);
  4777. buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
  4778. buf->f_namelen = EXT4_NAME_LEN;
  4779. fsid = le64_to_cpup((void *)es->s_uuid) ^
  4780. le64_to_cpup((void *)es->s_uuid + sizeof(u64));
  4781. buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
  4782. buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
  4783. #ifdef CONFIG_QUOTA
  4784. if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
  4785. sb_has_quota_limits_enabled(sb, PRJQUOTA))
  4786. ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
  4787. #endif
  4788. return 0;
  4789. }
  4790. #ifdef CONFIG_QUOTA
  4791. /*
  4792. * Helper functions so that transaction is started before we acquire dqio_sem
  4793. * to keep correct lock ordering of transaction > dqio_sem
  4794. */
  4795. static inline struct inode *dquot_to_inode(struct dquot *dquot)
  4796. {
  4797. return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
  4798. }
  4799. static int ext4_write_dquot(struct dquot *dquot)
  4800. {
  4801. int ret, err;
  4802. handle_t *handle;
  4803. struct inode *inode;
  4804. inode = dquot_to_inode(dquot);
  4805. handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
  4806. EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
  4807. if (IS_ERR(handle))
  4808. return PTR_ERR(handle);
  4809. ret = dquot_commit(dquot);
  4810. err = ext4_journal_stop(handle);
  4811. if (!ret)
  4812. ret = err;
  4813. return ret;
  4814. }
  4815. static int ext4_acquire_dquot(struct dquot *dquot)
  4816. {
  4817. int ret, err;
  4818. handle_t *handle;
  4819. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4820. EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
  4821. if (IS_ERR(handle))
  4822. return PTR_ERR(handle);
  4823. ret = dquot_acquire(dquot);
  4824. err = ext4_journal_stop(handle);
  4825. if (!ret)
  4826. ret = err;
  4827. return ret;
  4828. }
  4829. static int ext4_release_dquot(struct dquot *dquot)
  4830. {
  4831. int ret, err;
  4832. handle_t *handle;
  4833. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4834. EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
  4835. if (IS_ERR(handle)) {
  4836. /* Release dquot anyway to avoid endless cycle in dqput() */
  4837. dquot_release(dquot);
  4838. return PTR_ERR(handle);
  4839. }
  4840. ret = dquot_release(dquot);
  4841. err = ext4_journal_stop(handle);
  4842. if (!ret)
  4843. ret = err;
  4844. return ret;
  4845. }
  4846. static int ext4_mark_dquot_dirty(struct dquot *dquot)
  4847. {
  4848. struct super_block *sb = dquot->dq_sb;
  4849. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4850. /* Are we journaling quotas? */
  4851. if (ext4_has_feature_quota(sb) ||
  4852. sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  4853. dquot_mark_dquot_dirty(dquot);
  4854. return ext4_write_dquot(dquot);
  4855. } else {
  4856. return dquot_mark_dquot_dirty(dquot);
  4857. }
  4858. }
  4859. static int ext4_write_info(struct super_block *sb, int type)
  4860. {
  4861. int ret, err;
  4862. handle_t *handle;
  4863. /* Data block + inode block */
  4864. handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
  4865. if (IS_ERR(handle))
  4866. return PTR_ERR(handle);
  4867. ret = dquot_commit_info(sb, type);
  4868. err = ext4_journal_stop(handle);
  4869. if (!ret)
  4870. ret = err;
  4871. return ret;
  4872. }
  4873. /*
  4874. * Turn on quotas during mount time - we need to find
  4875. * the quota file and such...
  4876. */
  4877. static int ext4_quota_on_mount(struct super_block *sb, int type)
  4878. {
  4879. return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
  4880. EXT4_SB(sb)->s_jquota_fmt, type);
  4881. }
  4882. static void lockdep_set_quota_inode(struct inode *inode, int subclass)
  4883. {
  4884. struct ext4_inode_info *ei = EXT4_I(inode);
  4885. /* The first argument of lockdep_set_subclass has to be
  4886. * *exactly* the same as the argument to init_rwsem() --- in
  4887. * this case, in init_once() --- or lockdep gets unhappy
  4888. * because the name of the lock is set using the
  4889. * stringification of the argument to init_rwsem().
  4890. */
  4891. (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
  4892. lockdep_set_subclass(&ei->i_data_sem, subclass);
  4893. }
  4894. /*
  4895. * Standard function to be called on quota_on
  4896. */
  4897. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  4898. const struct path *path)
  4899. {
  4900. int err;
  4901. if (!test_opt(sb, QUOTA))
  4902. return -EINVAL;
  4903. /* Quotafile not on the same filesystem? */
  4904. if (path->dentry->d_sb != sb)
  4905. return -EXDEV;
  4906. /* Journaling quota? */
  4907. if (EXT4_SB(sb)->s_qf_names[type]) {
  4908. /* Quotafile not in fs root? */
  4909. if (path->dentry->d_parent != sb->s_root)
  4910. ext4_msg(sb, KERN_WARNING,
  4911. "Quota file not on filesystem root. "
  4912. "Journaled quota will not work");
  4913. sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
  4914. } else {
  4915. /*
  4916. * Clear the flag just in case mount options changed since
  4917. * last time.
  4918. */
  4919. sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
  4920. }
  4921. /*
  4922. * When we journal data on quota file, we have to flush journal to see
  4923. * all updates to the file when we bypass pagecache...
  4924. */
  4925. if (EXT4_SB(sb)->s_journal &&
  4926. ext4_should_journal_data(d_inode(path->dentry))) {
  4927. /*
  4928. * We don't need to lock updates but journal_flush() could
  4929. * otherwise be livelocked...
  4930. */
  4931. jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
  4932. err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
  4933. jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
  4934. if (err)
  4935. return err;
  4936. }
  4937. lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
  4938. err = dquot_quota_on(sb, type, format_id, path);
  4939. if (err) {
  4940. lockdep_set_quota_inode(path->dentry->d_inode,
  4941. I_DATA_SEM_NORMAL);
  4942. } else {
  4943. struct inode *inode = d_inode(path->dentry);
  4944. handle_t *handle;
  4945. /*
  4946. * Set inode flags to prevent userspace from messing with quota
  4947. * files. If this fails, we return success anyway since quotas
  4948. * are already enabled and this is not a hard failure.
  4949. */
  4950. inode_lock(inode);
  4951. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  4952. if (IS_ERR(handle))
  4953. goto unlock_inode;
  4954. EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
  4955. inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
  4956. S_NOATIME | S_IMMUTABLE);
  4957. ext4_mark_inode_dirty(handle, inode);
  4958. ext4_journal_stop(handle);
  4959. unlock_inode:
  4960. inode_unlock(inode);
  4961. }
  4962. return err;
  4963. }
  4964. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  4965. unsigned int flags)
  4966. {
  4967. int err;
  4968. struct inode *qf_inode;
  4969. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4970. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4971. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4972. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4973. };
  4974. BUG_ON(!ext4_has_feature_quota(sb));
  4975. if (!qf_inums[type])
  4976. return -EPERM;
  4977. qf_inode = ext4_iget(sb, qf_inums[type]);
  4978. if (IS_ERR(qf_inode)) {
  4979. ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
  4980. return PTR_ERR(qf_inode);
  4981. }
  4982. /* Don't account quota for quota files to avoid recursion */
  4983. qf_inode->i_flags |= S_NOQUOTA;
  4984. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
  4985. err = dquot_enable(qf_inode, type, format_id, flags);
  4986. iput(qf_inode);
  4987. if (err)
  4988. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
  4989. return err;
  4990. }
  4991. /* Enable usage tracking for all quota types. */
  4992. static int ext4_enable_quotas(struct super_block *sb)
  4993. {
  4994. int type, err = 0;
  4995. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4996. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4997. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4998. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4999. };
  5000. bool quota_mopt[EXT4_MAXQUOTAS] = {
  5001. test_opt(sb, USRQUOTA),
  5002. test_opt(sb, GRPQUOTA),
  5003. test_opt(sb, PRJQUOTA),
  5004. };
  5005. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
  5006. for (type = 0; type < EXT4_MAXQUOTAS; type++) {
  5007. if (qf_inums[type]) {
  5008. err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
  5009. DQUOT_USAGE_ENABLED |
  5010. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  5011. if (err) {
  5012. for (type--; type >= 0; type--)
  5013. dquot_quota_off(sb, type);
  5014. ext4_warning(sb,
  5015. "Failed to enable quota tracking "
  5016. "(type=%d, err=%d). Please run "
  5017. "e2fsck to fix.", type, err);
  5018. return err;
  5019. }
  5020. }
  5021. }
  5022. return 0;
  5023. }
  5024. static int ext4_quota_off(struct super_block *sb, int type)
  5025. {
  5026. struct inode *inode = sb_dqopt(sb)->files[type];
  5027. handle_t *handle;
  5028. int err;
  5029. /* Force all delayed allocation blocks to be allocated.
  5030. * Caller already holds s_umount sem */
  5031. if (test_opt(sb, DELALLOC))
  5032. sync_filesystem(sb);
  5033. if (!inode || !igrab(inode))
  5034. goto out;
  5035. err = dquot_quota_off(sb, type);
  5036. if (err || ext4_has_feature_quota(sb))
  5037. goto out_put;
  5038. inode_lock(inode);
  5039. /*
  5040. * Update modification times of quota files when userspace can
  5041. * start looking at them. If we fail, we return success anyway since
  5042. * this is not a hard failure and quotas are already disabled.
  5043. */
  5044. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  5045. if (IS_ERR(handle))
  5046. goto out_unlock;
  5047. EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
  5048. inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
  5049. inode->i_mtime = inode->i_ctime = current_time(inode);
  5050. ext4_mark_inode_dirty(handle, inode);
  5051. ext4_journal_stop(handle);
  5052. out_unlock:
  5053. inode_unlock(inode);
  5054. out_put:
  5055. lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
  5056. iput(inode);
  5057. return err;
  5058. out:
  5059. return dquot_quota_off(sb, type);
  5060. }
  5061. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  5062. * acquiring the locks... As quota files are never truncated and quota code
  5063. * itself serializes the operations (and no one else should touch the files)
  5064. * we don't have to be afraid of races */
  5065. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  5066. size_t len, loff_t off)
  5067. {
  5068. struct inode *inode = sb_dqopt(sb)->files[type];
  5069. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5070. int offset = off & (sb->s_blocksize - 1);
  5071. int tocopy;
  5072. size_t toread;
  5073. struct buffer_head *bh;
  5074. loff_t i_size = i_size_read(inode);
  5075. if (off > i_size)
  5076. return 0;
  5077. if (off+len > i_size)
  5078. len = i_size-off;
  5079. toread = len;
  5080. while (toread > 0) {
  5081. tocopy = sb->s_blocksize - offset < toread ?
  5082. sb->s_blocksize - offset : toread;
  5083. bh = ext4_bread(NULL, inode, blk, 0);
  5084. if (IS_ERR(bh))
  5085. return PTR_ERR(bh);
  5086. if (!bh) /* A hole? */
  5087. memset(data, 0, tocopy);
  5088. else
  5089. memcpy(data, bh->b_data+offset, tocopy);
  5090. brelse(bh);
  5091. offset = 0;
  5092. toread -= tocopy;
  5093. data += tocopy;
  5094. blk++;
  5095. }
  5096. return len;
  5097. }
  5098. /* Write to quotafile (we know the transaction is already started and has
  5099. * enough credits) */
  5100. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  5101. const char *data, size_t len, loff_t off)
  5102. {
  5103. struct inode *inode = sb_dqopt(sb)->files[type];
  5104. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5105. int err, offset = off & (sb->s_blocksize - 1);
  5106. int retries = 0;
  5107. struct buffer_head *bh;
  5108. handle_t *handle = journal_current_handle();
  5109. if (EXT4_SB(sb)->s_journal && !handle) {
  5110. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5111. " cancelled because transaction is not started",
  5112. (unsigned long long)off, (unsigned long long)len);
  5113. return -EIO;
  5114. }
  5115. /*
  5116. * Since we account only one data block in transaction credits,
  5117. * then it is impossible to cross a block boundary.
  5118. */
  5119. if (sb->s_blocksize - offset < len) {
  5120. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5121. " cancelled because not block aligned",
  5122. (unsigned long long)off, (unsigned long long)len);
  5123. return -EIO;
  5124. }
  5125. do {
  5126. bh = ext4_bread(handle, inode, blk,
  5127. EXT4_GET_BLOCKS_CREATE |
  5128. EXT4_GET_BLOCKS_METADATA_NOFAIL);
  5129. } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
  5130. ext4_should_retry_alloc(inode->i_sb, &retries));
  5131. if (IS_ERR(bh))
  5132. return PTR_ERR(bh);
  5133. if (!bh)
  5134. goto out;
  5135. BUFFER_TRACE(bh, "get write access");
  5136. err = ext4_journal_get_write_access(handle, bh);
  5137. if (err) {
  5138. brelse(bh);
  5139. return err;
  5140. }
  5141. lock_buffer(bh);
  5142. memcpy(bh->b_data+offset, data, len);
  5143. flush_dcache_page(bh->b_page);
  5144. unlock_buffer(bh);
  5145. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  5146. brelse(bh);
  5147. out:
  5148. if (inode->i_size < off + len) {
  5149. i_size_write(inode, off + len);
  5150. EXT4_I(inode)->i_disksize = inode->i_size;
  5151. ext4_mark_inode_dirty(handle, inode);
  5152. }
  5153. return len;
  5154. }
  5155. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
  5156. {
  5157. const struct quota_format_ops *ops;
  5158. if (!sb_has_quota_loaded(sb, qid->type))
  5159. return -ESRCH;
  5160. ops = sb_dqopt(sb)->ops[qid->type];
  5161. if (!ops || !ops->get_next_id)
  5162. return -ENOSYS;
  5163. return dquot_get_next_id(sb, qid);
  5164. }
  5165. #endif
  5166. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  5167. const char *dev_name, void *data)
  5168. {
  5169. return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
  5170. }
  5171. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  5172. static inline void register_as_ext2(void)
  5173. {
  5174. int err = register_filesystem(&ext2_fs_type);
  5175. if (err)
  5176. printk(KERN_WARNING
  5177. "EXT4-fs: Unable to register as ext2 (%d)\n", err);
  5178. }
  5179. static inline void unregister_as_ext2(void)
  5180. {
  5181. unregister_filesystem(&ext2_fs_type);
  5182. }
  5183. static inline int ext2_feature_set_ok(struct super_block *sb)
  5184. {
  5185. if (ext4_has_unknown_ext2_incompat_features(sb))
  5186. return 0;
  5187. if (sb_rdonly(sb))
  5188. return 1;
  5189. if (ext4_has_unknown_ext2_ro_compat_features(sb))
  5190. return 0;
  5191. return 1;
  5192. }
  5193. #else
  5194. static inline void register_as_ext2(void) { }
  5195. static inline void unregister_as_ext2(void) { }
  5196. static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
  5197. #endif
  5198. static inline void register_as_ext3(void)
  5199. {
  5200. int err = register_filesystem(&ext3_fs_type);
  5201. if (err)
  5202. printk(KERN_WARNING
  5203. "EXT4-fs: Unable to register as ext3 (%d)\n", err);
  5204. }
  5205. static inline void unregister_as_ext3(void)
  5206. {
  5207. unregister_filesystem(&ext3_fs_type);
  5208. }
  5209. static inline int ext3_feature_set_ok(struct super_block *sb)
  5210. {
  5211. if (ext4_has_unknown_ext3_incompat_features(sb))
  5212. return 0;
  5213. if (!ext4_has_feature_journal(sb))
  5214. return 0;
  5215. if (sb_rdonly(sb))
  5216. return 1;
  5217. if (ext4_has_unknown_ext3_ro_compat_features(sb))
  5218. return 0;
  5219. return 1;
  5220. }
  5221. static struct file_system_type ext4_fs_type = {
  5222. .owner = THIS_MODULE,
  5223. .name = "ext4",
  5224. .mount = ext4_mount,
  5225. .kill_sb = kill_block_super,
  5226. .fs_flags = FS_REQUIRES_DEV,
  5227. };
  5228. MODULE_ALIAS_FS("ext4");
  5229. /* Shared across all ext4 file systems */
  5230. wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
  5231. static int __init ext4_init_fs(void)
  5232. {
  5233. int i, err;
  5234. ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
  5235. ext4_li_info = NULL;
  5236. mutex_init(&ext4_li_mtx);
  5237. /* Build-time check for flags consistency */
  5238. ext4_check_flag_values();
  5239. for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
  5240. init_waitqueue_head(&ext4__ioend_wq[i]);
  5241. err = ext4_init_es();
  5242. if (err)
  5243. return err;
  5244. err = ext4_init_pageio();
  5245. if (err)
  5246. goto out5;
  5247. err = ext4_init_system_zone();
  5248. if (err)
  5249. goto out4;
  5250. err = ext4_init_sysfs();
  5251. if (err)
  5252. goto out3;
  5253. err = ext4_init_mballoc();
  5254. if (err)
  5255. goto out2;
  5256. err = init_inodecache();
  5257. if (err)
  5258. goto out1;
  5259. register_as_ext3();
  5260. register_as_ext2();
  5261. err = register_filesystem(&ext4_fs_type);
  5262. if (err)
  5263. goto out;
  5264. return 0;
  5265. out:
  5266. unregister_as_ext2();
  5267. unregister_as_ext3();
  5268. destroy_inodecache();
  5269. out1:
  5270. ext4_exit_mballoc();
  5271. out2:
  5272. ext4_exit_sysfs();
  5273. out3:
  5274. ext4_exit_system_zone();
  5275. out4:
  5276. ext4_exit_pageio();
  5277. out5:
  5278. ext4_exit_es();
  5279. return err;
  5280. }
  5281. static void __exit ext4_exit_fs(void)
  5282. {
  5283. ext4_destroy_lazyinit_thread();
  5284. unregister_as_ext2();
  5285. unregister_as_ext3();
  5286. unregister_filesystem(&ext4_fs_type);
  5287. destroy_inodecache();
  5288. ext4_exit_mballoc();
  5289. ext4_exit_sysfs();
  5290. ext4_exit_system_zone();
  5291. ext4_exit_pageio();
  5292. ext4_exit_es();
  5293. }
  5294. MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
  5295. MODULE_DESCRIPTION("Fourth Extended Filesystem");
  5296. MODULE_LICENSE("GPL");
  5297. MODULE_SOFTDEP("pre: crc32c");
  5298. module_init(ext4_init_fs)
  5299. module_exit(ext4_exit_fs)