super.c 171 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/ext4/super.c
  4. *
  5. * Copyright (C) 1992, 1993, 1994, 1995
  6. * Remy Card (card@masi.ibp.fr)
  7. * Laboratoire MASI - Institut Blaise Pascal
  8. * Universite Pierre et Marie Curie (Paris VI)
  9. *
  10. * from
  11. *
  12. * linux/fs/minix/inode.c
  13. *
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * Big-endian to little-endian byte-swapping/bitmaps by
  17. * David S. Miller (davem@caip.rutgers.edu), 1995
  18. */
  19. #include <linux/module.h>
  20. #include <linux/string.h>
  21. #include <linux/fs.h>
  22. #include <linux/time.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/slab.h>
  25. #include <linux/init.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/backing-dev.h>
  28. #include <linux/parser.h>
  29. #include <linux/buffer_head.h>
  30. #include <linux/exportfs.h>
  31. #include <linux/vfs.h>
  32. #include <linux/random.h>
  33. #include <linux/mount.h>
  34. #include <linux/namei.h>
  35. #include <linux/quotaops.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/ctype.h>
  38. #include <linux/log2.h>
  39. #include <linux/crc16.h>
  40. #include <linux/dax.h>
  41. #include <linux/cleancache.h>
  42. #include <linux/uaccess.h>
  43. #include <linux/iversion.h>
  44. #include <linux/kthread.h>
  45. #include <linux/freezer.h>
  46. #include "ext4.h"
  47. #include "ext4_extents.h" /* Needed for trace points definition */
  48. #include "ext4_jbd2.h"
  49. #include "xattr.h"
  50. #include "acl.h"
  51. #include "mballoc.h"
  52. #include "fsmap.h"
  53. #define CREATE_TRACE_POINTS
  54. #include <trace/events/ext4.h>
  55. static struct ext4_lazy_init *ext4_li_info;
  56. static struct mutex ext4_li_mtx;
  57. static struct ratelimit_state ext4_mount_msg_ratelimit;
  58. static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
  59. unsigned long journal_devnum);
  60. static int ext4_show_options(struct seq_file *seq, struct dentry *root);
  61. static int ext4_commit_super(struct super_block *sb, int sync);
  62. static void ext4_mark_recovery_complete(struct super_block *sb,
  63. struct ext4_super_block *es);
  64. static void ext4_clear_journal_err(struct super_block *sb,
  65. struct ext4_super_block *es);
  66. static int ext4_sync_fs(struct super_block *sb, int wait);
  67. static int ext4_remount(struct super_block *sb, int *flags, char *data);
  68. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
  69. static int ext4_unfreeze(struct super_block *sb);
  70. static int ext4_freeze(struct super_block *sb);
  71. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  72. const char *dev_name, void *data);
  73. static inline int ext2_feature_set_ok(struct super_block *sb);
  74. static inline int ext3_feature_set_ok(struct super_block *sb);
  75. static int ext4_feature_set_ok(struct super_block *sb, int readonly);
  76. static void ext4_destroy_lazyinit_thread(void);
  77. static void ext4_unregister_li_request(struct super_block *sb);
  78. static void ext4_clear_request_list(void);
  79. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  80. unsigned int journal_inum);
  81. /*
  82. * Lock ordering
  83. *
  84. * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
  85. * i_mmap_rwsem (inode->i_mmap_rwsem)!
  86. *
  87. * page fault path:
  88. * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
  89. * page lock -> i_data_sem (rw)
  90. *
  91. * buffered write path:
  92. * sb_start_write -> i_mutex -> mmap_sem
  93. * sb_start_write -> i_mutex -> transaction start -> page lock ->
  94. * i_data_sem (rw)
  95. *
  96. * truncate:
  97. * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
  98. * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
  99. * i_data_sem (rw)
  100. *
  101. * direct IO:
  102. * sb_start_write -> i_mutex -> mmap_sem
  103. * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
  104. *
  105. * writepages:
  106. * transaction start -> page lock(s) -> i_data_sem (rw)
  107. */
  108. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  109. static struct file_system_type ext2_fs_type = {
  110. .owner = THIS_MODULE,
  111. .name = "ext2",
  112. .mount = ext4_mount,
  113. .kill_sb = kill_block_super,
  114. .fs_flags = FS_REQUIRES_DEV,
  115. };
  116. MODULE_ALIAS_FS("ext2");
  117. MODULE_ALIAS("ext2");
  118. #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
  119. #else
  120. #define IS_EXT2_SB(sb) (0)
  121. #endif
  122. static struct file_system_type ext3_fs_type = {
  123. .owner = THIS_MODULE,
  124. .name = "ext3",
  125. .mount = ext4_mount,
  126. .kill_sb = kill_block_super,
  127. .fs_flags = FS_REQUIRES_DEV,
  128. };
  129. MODULE_ALIAS_FS("ext3");
  130. MODULE_ALIAS("ext3");
  131. #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
  132. static int ext4_verify_csum_type(struct super_block *sb,
  133. struct ext4_super_block *es)
  134. {
  135. if (!ext4_has_feature_metadata_csum(sb))
  136. return 1;
  137. return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
  138. }
  139. static __le32 ext4_superblock_csum(struct super_block *sb,
  140. struct ext4_super_block *es)
  141. {
  142. struct ext4_sb_info *sbi = EXT4_SB(sb);
  143. int offset = offsetof(struct ext4_super_block, s_checksum);
  144. __u32 csum;
  145. csum = ext4_chksum(sbi, ~0, (char *)es, offset);
  146. return cpu_to_le32(csum);
  147. }
  148. static int ext4_superblock_csum_verify(struct super_block *sb,
  149. struct ext4_super_block *es)
  150. {
  151. if (!ext4_has_metadata_csum(sb))
  152. return 1;
  153. return es->s_checksum == ext4_superblock_csum(sb, es);
  154. }
  155. void ext4_superblock_csum_set(struct super_block *sb)
  156. {
  157. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  158. if (!ext4_has_metadata_csum(sb))
  159. return;
  160. es->s_checksum = ext4_superblock_csum(sb, es);
  161. }
  162. void *ext4_kvmalloc(size_t size, gfp_t flags)
  163. {
  164. void *ret;
  165. ret = kmalloc(size, flags | __GFP_NOWARN);
  166. if (!ret)
  167. ret = __vmalloc(size, flags, PAGE_KERNEL);
  168. return ret;
  169. }
  170. void *ext4_kvzalloc(size_t size, gfp_t flags)
  171. {
  172. void *ret;
  173. ret = kzalloc(size, flags | __GFP_NOWARN);
  174. if (!ret)
  175. ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
  176. return ret;
  177. }
  178. ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
  179. struct ext4_group_desc *bg)
  180. {
  181. return le32_to_cpu(bg->bg_block_bitmap_lo) |
  182. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  183. (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
  184. }
  185. ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
  186. struct ext4_group_desc *bg)
  187. {
  188. return le32_to_cpu(bg->bg_inode_bitmap_lo) |
  189. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  190. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
  191. }
  192. ext4_fsblk_t ext4_inode_table(struct super_block *sb,
  193. struct ext4_group_desc *bg)
  194. {
  195. return le32_to_cpu(bg->bg_inode_table_lo) |
  196. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  197. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
  198. }
  199. __u32 ext4_free_group_clusters(struct super_block *sb,
  200. struct ext4_group_desc *bg)
  201. {
  202. return le16_to_cpu(bg->bg_free_blocks_count_lo) |
  203. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  204. (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
  205. }
  206. __u32 ext4_free_inodes_count(struct super_block *sb,
  207. struct ext4_group_desc *bg)
  208. {
  209. return le16_to_cpu(bg->bg_free_inodes_count_lo) |
  210. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  211. (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
  212. }
  213. __u32 ext4_used_dirs_count(struct super_block *sb,
  214. struct ext4_group_desc *bg)
  215. {
  216. return le16_to_cpu(bg->bg_used_dirs_count_lo) |
  217. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  218. (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
  219. }
  220. __u32 ext4_itable_unused_count(struct super_block *sb,
  221. struct ext4_group_desc *bg)
  222. {
  223. return le16_to_cpu(bg->bg_itable_unused_lo) |
  224. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  225. (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
  226. }
  227. void ext4_block_bitmap_set(struct super_block *sb,
  228. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  229. {
  230. bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
  231. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  232. bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
  233. }
  234. void ext4_inode_bitmap_set(struct super_block *sb,
  235. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  236. {
  237. bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
  238. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  239. bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
  240. }
  241. void ext4_inode_table_set(struct super_block *sb,
  242. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  243. {
  244. bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
  245. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  246. bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
  247. }
  248. void ext4_free_group_clusters_set(struct super_block *sb,
  249. struct ext4_group_desc *bg, __u32 count)
  250. {
  251. bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
  252. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  253. bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
  254. }
  255. void ext4_free_inodes_set(struct super_block *sb,
  256. struct ext4_group_desc *bg, __u32 count)
  257. {
  258. bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
  259. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  260. bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
  261. }
  262. void ext4_used_dirs_set(struct super_block *sb,
  263. struct ext4_group_desc *bg, __u32 count)
  264. {
  265. bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
  266. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  267. bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
  268. }
  269. void ext4_itable_unused_set(struct super_block *sb,
  270. struct ext4_group_desc *bg, __u32 count)
  271. {
  272. bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
  273. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  274. bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
  275. }
  276. static void __ext4_update_tstamp(__le32 *lo, __u8 *hi)
  277. {
  278. time64_t now = ktime_get_real_seconds();
  279. now = clamp_val(now, 0, (1ull << 40) - 1);
  280. *lo = cpu_to_le32(lower_32_bits(now));
  281. *hi = upper_32_bits(now);
  282. }
  283. static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
  284. {
  285. return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
  286. }
  287. #define ext4_update_tstamp(es, tstamp) \
  288. __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
  289. #define ext4_get_tstamp(es, tstamp) \
  290. __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
  291. static void __save_error_info(struct super_block *sb, const char *func,
  292. unsigned int line)
  293. {
  294. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  295. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  296. if (bdev_read_only(sb->s_bdev))
  297. return;
  298. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  299. ext4_update_tstamp(es, s_last_error_time);
  300. strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
  301. es->s_last_error_line = cpu_to_le32(line);
  302. if (!es->s_first_error_time) {
  303. es->s_first_error_time = es->s_last_error_time;
  304. es->s_first_error_time_hi = es->s_last_error_time_hi;
  305. strncpy(es->s_first_error_func, func,
  306. sizeof(es->s_first_error_func));
  307. es->s_first_error_line = cpu_to_le32(line);
  308. es->s_first_error_ino = es->s_last_error_ino;
  309. es->s_first_error_block = es->s_last_error_block;
  310. }
  311. /*
  312. * Start the daily error reporting function if it hasn't been
  313. * started already
  314. */
  315. if (!es->s_error_count)
  316. mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
  317. le32_add_cpu(&es->s_error_count, 1);
  318. }
  319. static void save_error_info(struct super_block *sb, const char *func,
  320. unsigned int line)
  321. {
  322. __save_error_info(sb, func, line);
  323. ext4_commit_super(sb, 1);
  324. }
  325. /*
  326. * The del_gendisk() function uninitializes the disk-specific data
  327. * structures, including the bdi structure, without telling anyone
  328. * else. Once this happens, any attempt to call mark_buffer_dirty()
  329. * (for example, by ext4_commit_super), will cause a kernel OOPS.
  330. * This is a kludge to prevent these oops until we can put in a proper
  331. * hook in del_gendisk() to inform the VFS and file system layers.
  332. */
  333. static int block_device_ejected(struct super_block *sb)
  334. {
  335. struct inode *bd_inode = sb->s_bdev->bd_inode;
  336. struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
  337. return bdi->dev == NULL;
  338. }
  339. static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
  340. {
  341. struct super_block *sb = journal->j_private;
  342. struct ext4_sb_info *sbi = EXT4_SB(sb);
  343. int error = is_journal_aborted(journal);
  344. struct ext4_journal_cb_entry *jce;
  345. BUG_ON(txn->t_state == T_FINISHED);
  346. ext4_process_freed_data(sb, txn->t_tid);
  347. spin_lock(&sbi->s_md_lock);
  348. while (!list_empty(&txn->t_private_list)) {
  349. jce = list_entry(txn->t_private_list.next,
  350. struct ext4_journal_cb_entry, jce_list);
  351. list_del_init(&jce->jce_list);
  352. spin_unlock(&sbi->s_md_lock);
  353. jce->jce_func(sb, jce, error);
  354. spin_lock(&sbi->s_md_lock);
  355. }
  356. spin_unlock(&sbi->s_md_lock);
  357. }
  358. /* Deal with the reporting of failure conditions on a filesystem such as
  359. * inconsistencies detected or read IO failures.
  360. *
  361. * On ext2, we can store the error state of the filesystem in the
  362. * superblock. That is not possible on ext4, because we may have other
  363. * write ordering constraints on the superblock which prevent us from
  364. * writing it out straight away; and given that the journal is about to
  365. * be aborted, we can't rely on the current, or future, transactions to
  366. * write out the superblock safely.
  367. *
  368. * We'll just use the jbd2_journal_abort() error code to record an error in
  369. * the journal instead. On recovery, the journal will complain about
  370. * that error until we've noted it down and cleared it.
  371. */
  372. static void ext4_handle_error(struct super_block *sb)
  373. {
  374. if (test_opt(sb, WARN_ON_ERROR))
  375. WARN_ON_ONCE(1);
  376. if (sb_rdonly(sb))
  377. return;
  378. if (!test_opt(sb, ERRORS_CONT)) {
  379. journal_t *journal = EXT4_SB(sb)->s_journal;
  380. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  381. if (journal)
  382. jbd2_journal_abort(journal, -EIO);
  383. }
  384. if (test_opt(sb, ERRORS_RO)) {
  385. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  386. /*
  387. * Make sure updated value of ->s_mount_flags will be visible
  388. * before ->s_flags update
  389. */
  390. smp_wmb();
  391. sb->s_flags |= SB_RDONLY;
  392. }
  393. if (test_opt(sb, ERRORS_PANIC)) {
  394. if (EXT4_SB(sb)->s_journal &&
  395. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  396. return;
  397. panic("EXT4-fs (device %s): panic forced after error\n",
  398. sb->s_id);
  399. }
  400. }
  401. #define ext4_error_ratelimit(sb) \
  402. ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
  403. "EXT4-fs error")
  404. void __ext4_error(struct super_block *sb, const char *function,
  405. unsigned int line, const char *fmt, ...)
  406. {
  407. struct va_format vaf;
  408. va_list args;
  409. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  410. return;
  411. trace_ext4_error(sb, function, line);
  412. if (ext4_error_ratelimit(sb)) {
  413. va_start(args, fmt);
  414. vaf.fmt = fmt;
  415. vaf.va = &args;
  416. printk(KERN_CRIT
  417. "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
  418. sb->s_id, function, line, current->comm, &vaf);
  419. va_end(args);
  420. }
  421. save_error_info(sb, function, line);
  422. ext4_handle_error(sb);
  423. }
  424. void __ext4_error_inode(struct inode *inode, const char *function,
  425. unsigned int line, ext4_fsblk_t block,
  426. const char *fmt, ...)
  427. {
  428. va_list args;
  429. struct va_format vaf;
  430. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  431. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  432. return;
  433. trace_ext4_error(inode->i_sb, function, line);
  434. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  435. es->s_last_error_block = cpu_to_le64(block);
  436. if (ext4_error_ratelimit(inode->i_sb)) {
  437. va_start(args, fmt);
  438. vaf.fmt = fmt;
  439. vaf.va = &args;
  440. if (block)
  441. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  442. "inode #%lu: block %llu: comm %s: %pV\n",
  443. inode->i_sb->s_id, function, line, inode->i_ino,
  444. block, current->comm, &vaf);
  445. else
  446. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  447. "inode #%lu: comm %s: %pV\n",
  448. inode->i_sb->s_id, function, line, inode->i_ino,
  449. current->comm, &vaf);
  450. va_end(args);
  451. }
  452. save_error_info(inode->i_sb, function, line);
  453. ext4_handle_error(inode->i_sb);
  454. }
  455. void __ext4_error_file(struct file *file, const char *function,
  456. unsigned int line, ext4_fsblk_t block,
  457. const char *fmt, ...)
  458. {
  459. va_list args;
  460. struct va_format vaf;
  461. struct ext4_super_block *es;
  462. struct inode *inode = file_inode(file);
  463. char pathname[80], *path;
  464. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  465. return;
  466. trace_ext4_error(inode->i_sb, function, line);
  467. es = EXT4_SB(inode->i_sb)->s_es;
  468. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  469. if (ext4_error_ratelimit(inode->i_sb)) {
  470. path = file_path(file, pathname, sizeof(pathname));
  471. if (IS_ERR(path))
  472. path = "(unknown)";
  473. va_start(args, fmt);
  474. vaf.fmt = fmt;
  475. vaf.va = &args;
  476. if (block)
  477. printk(KERN_CRIT
  478. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  479. "block %llu: comm %s: path %s: %pV\n",
  480. inode->i_sb->s_id, function, line, inode->i_ino,
  481. block, current->comm, path, &vaf);
  482. else
  483. printk(KERN_CRIT
  484. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  485. "comm %s: path %s: %pV\n",
  486. inode->i_sb->s_id, function, line, inode->i_ino,
  487. current->comm, path, &vaf);
  488. va_end(args);
  489. }
  490. save_error_info(inode->i_sb, function, line);
  491. ext4_handle_error(inode->i_sb);
  492. }
  493. const char *ext4_decode_error(struct super_block *sb, int errno,
  494. char nbuf[16])
  495. {
  496. char *errstr = NULL;
  497. switch (errno) {
  498. case -EFSCORRUPTED:
  499. errstr = "Corrupt filesystem";
  500. break;
  501. case -EFSBADCRC:
  502. errstr = "Filesystem failed CRC";
  503. break;
  504. case -EIO:
  505. errstr = "IO failure";
  506. break;
  507. case -ENOMEM:
  508. errstr = "Out of memory";
  509. break;
  510. case -EROFS:
  511. if (!sb || (EXT4_SB(sb)->s_journal &&
  512. EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
  513. errstr = "Journal has aborted";
  514. else
  515. errstr = "Readonly filesystem";
  516. break;
  517. default:
  518. /* If the caller passed in an extra buffer for unknown
  519. * errors, textualise them now. Else we just return
  520. * NULL. */
  521. if (nbuf) {
  522. /* Check for truncated error codes... */
  523. if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
  524. errstr = nbuf;
  525. }
  526. break;
  527. }
  528. return errstr;
  529. }
  530. /* __ext4_std_error decodes expected errors from journaling functions
  531. * automatically and invokes the appropriate error response. */
  532. void __ext4_std_error(struct super_block *sb, const char *function,
  533. unsigned int line, int errno)
  534. {
  535. char nbuf[16];
  536. const char *errstr;
  537. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  538. return;
  539. /* Special case: if the error is EROFS, and we're not already
  540. * inside a transaction, then there's really no point in logging
  541. * an error. */
  542. if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
  543. return;
  544. if (ext4_error_ratelimit(sb)) {
  545. errstr = ext4_decode_error(sb, errno, nbuf);
  546. printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
  547. sb->s_id, function, line, errstr);
  548. }
  549. save_error_info(sb, function, line);
  550. ext4_handle_error(sb);
  551. }
  552. /*
  553. * ext4_abort is a much stronger failure handler than ext4_error. The
  554. * abort function may be used to deal with unrecoverable failures such
  555. * as journal IO errors or ENOMEM at a critical moment in log management.
  556. *
  557. * We unconditionally force the filesystem into an ABORT|READONLY state,
  558. * unless the error response on the fs has been set to panic in which
  559. * case we take the easy way out and panic immediately.
  560. */
  561. void __ext4_abort(struct super_block *sb, const char *function,
  562. unsigned int line, const char *fmt, ...)
  563. {
  564. struct va_format vaf;
  565. va_list args;
  566. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  567. return;
  568. save_error_info(sb, function, line);
  569. va_start(args, fmt);
  570. vaf.fmt = fmt;
  571. vaf.va = &args;
  572. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
  573. sb->s_id, function, line, &vaf);
  574. va_end(args);
  575. if (sb_rdonly(sb) == 0) {
  576. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  577. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  578. /*
  579. * Make sure updated value of ->s_mount_flags will be visible
  580. * before ->s_flags update
  581. */
  582. smp_wmb();
  583. sb->s_flags |= SB_RDONLY;
  584. if (EXT4_SB(sb)->s_journal)
  585. jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
  586. save_error_info(sb, function, line);
  587. }
  588. if (test_opt(sb, ERRORS_PANIC)) {
  589. if (EXT4_SB(sb)->s_journal &&
  590. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  591. return;
  592. panic("EXT4-fs panic from previous error\n");
  593. }
  594. }
  595. void __ext4_msg(struct super_block *sb,
  596. const char *prefix, const char *fmt, ...)
  597. {
  598. struct va_format vaf;
  599. va_list args;
  600. if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
  601. return;
  602. va_start(args, fmt);
  603. vaf.fmt = fmt;
  604. vaf.va = &args;
  605. printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  606. va_end(args);
  607. }
  608. #define ext4_warning_ratelimit(sb) \
  609. ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
  610. "EXT4-fs warning")
  611. void __ext4_warning(struct super_block *sb, const char *function,
  612. unsigned int line, const char *fmt, ...)
  613. {
  614. struct va_format vaf;
  615. va_list args;
  616. if (!ext4_warning_ratelimit(sb))
  617. return;
  618. va_start(args, fmt);
  619. vaf.fmt = fmt;
  620. vaf.va = &args;
  621. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
  622. sb->s_id, function, line, &vaf);
  623. va_end(args);
  624. }
  625. void __ext4_warning_inode(const struct inode *inode, const char *function,
  626. unsigned int line, const char *fmt, ...)
  627. {
  628. struct va_format vaf;
  629. va_list args;
  630. if (!ext4_warning_ratelimit(inode->i_sb))
  631. return;
  632. va_start(args, fmt);
  633. vaf.fmt = fmt;
  634. vaf.va = &args;
  635. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
  636. "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
  637. function, line, inode->i_ino, current->comm, &vaf);
  638. va_end(args);
  639. }
  640. void __ext4_grp_locked_error(const char *function, unsigned int line,
  641. struct super_block *sb, ext4_group_t grp,
  642. unsigned long ino, ext4_fsblk_t block,
  643. const char *fmt, ...)
  644. __releases(bitlock)
  645. __acquires(bitlock)
  646. {
  647. struct va_format vaf;
  648. va_list args;
  649. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  650. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  651. return;
  652. trace_ext4_error(sb, function, line);
  653. es->s_last_error_ino = cpu_to_le32(ino);
  654. es->s_last_error_block = cpu_to_le64(block);
  655. __save_error_info(sb, function, line);
  656. if (ext4_error_ratelimit(sb)) {
  657. va_start(args, fmt);
  658. vaf.fmt = fmt;
  659. vaf.va = &args;
  660. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
  661. sb->s_id, function, line, grp);
  662. if (ino)
  663. printk(KERN_CONT "inode %lu: ", ino);
  664. if (block)
  665. printk(KERN_CONT "block %llu:",
  666. (unsigned long long) block);
  667. printk(KERN_CONT "%pV\n", &vaf);
  668. va_end(args);
  669. }
  670. if (test_opt(sb, WARN_ON_ERROR))
  671. WARN_ON_ONCE(1);
  672. if (test_opt(sb, ERRORS_CONT)) {
  673. ext4_commit_super(sb, 0);
  674. return;
  675. }
  676. ext4_unlock_group(sb, grp);
  677. ext4_commit_super(sb, 1);
  678. ext4_handle_error(sb);
  679. /*
  680. * We only get here in the ERRORS_RO case; relocking the group
  681. * may be dangerous, but nothing bad will happen since the
  682. * filesystem will have already been marked read/only and the
  683. * journal has been aborted. We return 1 as a hint to callers
  684. * who might what to use the return value from
  685. * ext4_grp_locked_error() to distinguish between the
  686. * ERRORS_CONT and ERRORS_RO case, and perhaps return more
  687. * aggressively from the ext4 function in question, with a
  688. * more appropriate error code.
  689. */
  690. ext4_lock_group(sb, grp);
  691. return;
  692. }
  693. void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
  694. ext4_group_t group,
  695. unsigned int flags)
  696. {
  697. struct ext4_sb_info *sbi = EXT4_SB(sb);
  698. struct ext4_group_info *grp = ext4_get_group_info(sb, group);
  699. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
  700. int ret;
  701. if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
  702. ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
  703. &grp->bb_state);
  704. if (!ret)
  705. percpu_counter_sub(&sbi->s_freeclusters_counter,
  706. grp->bb_free);
  707. }
  708. if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
  709. ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
  710. &grp->bb_state);
  711. if (!ret && gdp) {
  712. int count;
  713. count = ext4_free_inodes_count(sb, gdp);
  714. percpu_counter_sub(&sbi->s_freeinodes_counter,
  715. count);
  716. }
  717. }
  718. }
  719. void ext4_update_dynamic_rev(struct super_block *sb)
  720. {
  721. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  722. if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
  723. return;
  724. ext4_warning(sb,
  725. "updating to rev %d because of new feature flag, "
  726. "running e2fsck is recommended",
  727. EXT4_DYNAMIC_REV);
  728. es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
  729. es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
  730. es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
  731. /* leave es->s_feature_*compat flags alone */
  732. /* es->s_uuid will be set by e2fsck if empty */
  733. /*
  734. * The rest of the superblock fields should be zero, and if not it
  735. * means they are likely already in use, so leave them alone. We
  736. * can leave it up to e2fsck to clean up any inconsistencies there.
  737. */
  738. }
  739. /*
  740. * Open the external journal device
  741. */
  742. static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
  743. {
  744. struct block_device *bdev;
  745. char b[BDEVNAME_SIZE];
  746. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
  747. if (IS_ERR(bdev))
  748. goto fail;
  749. return bdev;
  750. fail:
  751. ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
  752. __bdevname(dev, b), PTR_ERR(bdev));
  753. return NULL;
  754. }
  755. /*
  756. * Release the journal device
  757. */
  758. static void ext4_blkdev_put(struct block_device *bdev)
  759. {
  760. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  761. }
  762. static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
  763. {
  764. struct block_device *bdev;
  765. bdev = sbi->journal_bdev;
  766. if (bdev) {
  767. ext4_blkdev_put(bdev);
  768. sbi->journal_bdev = NULL;
  769. }
  770. }
  771. static inline struct inode *orphan_list_entry(struct list_head *l)
  772. {
  773. return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
  774. }
  775. static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
  776. {
  777. struct list_head *l;
  778. ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
  779. le32_to_cpu(sbi->s_es->s_last_orphan));
  780. printk(KERN_ERR "sb_info orphan list:\n");
  781. list_for_each(l, &sbi->s_orphan) {
  782. struct inode *inode = orphan_list_entry(l);
  783. printk(KERN_ERR " "
  784. "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
  785. inode->i_sb->s_id, inode->i_ino, inode,
  786. inode->i_mode, inode->i_nlink,
  787. NEXT_ORPHAN(inode));
  788. }
  789. }
  790. #ifdef CONFIG_QUOTA
  791. static int ext4_quota_off(struct super_block *sb, int type);
  792. static inline void ext4_quota_off_umount(struct super_block *sb)
  793. {
  794. int type;
  795. /* Use our quota_off function to clear inode flags etc. */
  796. for (type = 0; type < EXT4_MAXQUOTAS; type++)
  797. ext4_quota_off(sb, type);
  798. }
  799. /*
  800. * This is a helper function which is used in the mount/remount
  801. * codepaths (which holds s_umount) to fetch the quota file name.
  802. */
  803. static inline char *get_qf_name(struct super_block *sb,
  804. struct ext4_sb_info *sbi,
  805. int type)
  806. {
  807. return rcu_dereference_protected(sbi->s_qf_names[type],
  808. lockdep_is_held(&sb->s_umount));
  809. }
  810. #else
  811. static inline void ext4_quota_off_umount(struct super_block *sb)
  812. {
  813. }
  814. #endif
  815. static void ext4_put_super(struct super_block *sb)
  816. {
  817. struct ext4_sb_info *sbi = EXT4_SB(sb);
  818. struct ext4_super_block *es = sbi->s_es;
  819. int aborted = 0;
  820. int i, err;
  821. ext4_unregister_li_request(sb);
  822. ext4_quota_off_umount(sb);
  823. destroy_workqueue(sbi->rsv_conversion_wq);
  824. if (sbi->s_journal) {
  825. aborted = is_journal_aborted(sbi->s_journal);
  826. err = jbd2_journal_destroy(sbi->s_journal);
  827. sbi->s_journal = NULL;
  828. if ((err < 0) && !aborted)
  829. ext4_abort(sb, "Couldn't clean up the journal");
  830. }
  831. ext4_unregister_sysfs(sb);
  832. ext4_es_unregister_shrinker(sbi);
  833. del_timer_sync(&sbi->s_err_report);
  834. ext4_release_system_zone(sb);
  835. ext4_mb_release(sb);
  836. ext4_ext_release(sb);
  837. if (!sb_rdonly(sb) && !aborted) {
  838. ext4_clear_feature_journal_needs_recovery(sb);
  839. es->s_state = cpu_to_le16(sbi->s_mount_state);
  840. }
  841. if (!sb_rdonly(sb))
  842. ext4_commit_super(sb, 1);
  843. for (i = 0; i < sbi->s_gdb_count; i++)
  844. brelse(sbi->s_group_desc[i]);
  845. kvfree(sbi->s_group_desc);
  846. kvfree(sbi->s_flex_groups);
  847. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  848. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  849. percpu_counter_destroy(&sbi->s_dirs_counter);
  850. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  851. percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
  852. #ifdef CONFIG_QUOTA
  853. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  854. kfree(get_qf_name(sb, sbi, i));
  855. #endif
  856. /* Debugging code just in case the in-memory inode orphan list
  857. * isn't empty. The on-disk one can be non-empty if we've
  858. * detected an error and taken the fs readonly, but the
  859. * in-memory list had better be clean by this point. */
  860. if (!list_empty(&sbi->s_orphan))
  861. dump_orphan_list(sb, sbi);
  862. J_ASSERT(list_empty(&sbi->s_orphan));
  863. sync_blockdev(sb->s_bdev);
  864. invalidate_bdev(sb->s_bdev);
  865. if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
  866. /*
  867. * Invalidate the journal device's buffers. We don't want them
  868. * floating about in memory - the physical journal device may
  869. * hotswapped, and it breaks the `ro-after' testing code.
  870. */
  871. sync_blockdev(sbi->journal_bdev);
  872. invalidate_bdev(sbi->journal_bdev);
  873. ext4_blkdev_remove(sbi);
  874. }
  875. if (sbi->s_ea_inode_cache) {
  876. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  877. sbi->s_ea_inode_cache = NULL;
  878. }
  879. if (sbi->s_ea_block_cache) {
  880. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  881. sbi->s_ea_block_cache = NULL;
  882. }
  883. if (sbi->s_mmp_tsk)
  884. kthread_stop(sbi->s_mmp_tsk);
  885. brelse(sbi->s_sbh);
  886. sb->s_fs_info = NULL;
  887. /*
  888. * Now that we are completely done shutting down the
  889. * superblock, we need to actually destroy the kobject.
  890. */
  891. kobject_put(&sbi->s_kobj);
  892. wait_for_completion(&sbi->s_kobj_unregister);
  893. if (sbi->s_chksum_driver)
  894. crypto_free_shash(sbi->s_chksum_driver);
  895. kfree(sbi->s_blockgroup_lock);
  896. fs_put_dax(sbi->s_daxdev);
  897. kfree(sbi);
  898. }
  899. static struct kmem_cache *ext4_inode_cachep;
  900. /*
  901. * Called inside transaction, so use GFP_NOFS
  902. */
  903. static struct inode *ext4_alloc_inode(struct super_block *sb)
  904. {
  905. struct ext4_inode_info *ei;
  906. ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
  907. if (!ei)
  908. return NULL;
  909. inode_set_iversion(&ei->vfs_inode, 1);
  910. spin_lock_init(&ei->i_raw_lock);
  911. INIT_LIST_HEAD(&ei->i_prealloc_list);
  912. spin_lock_init(&ei->i_prealloc_lock);
  913. ext4_es_init_tree(&ei->i_es_tree);
  914. rwlock_init(&ei->i_es_lock);
  915. INIT_LIST_HEAD(&ei->i_es_list);
  916. ei->i_es_all_nr = 0;
  917. ei->i_es_shk_nr = 0;
  918. ei->i_es_shrink_lblk = 0;
  919. ei->i_reserved_data_blocks = 0;
  920. ei->i_da_metadata_calc_len = 0;
  921. ei->i_da_metadata_calc_last_lblock = 0;
  922. spin_lock_init(&(ei->i_block_reservation_lock));
  923. ext4_init_pending_tree(&ei->i_pending_tree);
  924. #ifdef CONFIG_QUOTA
  925. ei->i_reserved_quota = 0;
  926. memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
  927. #endif
  928. ei->jinode = NULL;
  929. INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
  930. spin_lock_init(&ei->i_completed_io_lock);
  931. ei->i_sync_tid = 0;
  932. ei->i_datasync_tid = 0;
  933. atomic_set(&ei->i_unwritten, 0);
  934. INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
  935. return &ei->vfs_inode;
  936. }
  937. static int ext4_drop_inode(struct inode *inode)
  938. {
  939. int drop = generic_drop_inode(inode);
  940. trace_ext4_drop_inode(inode, drop);
  941. return drop;
  942. }
  943. static void ext4_i_callback(struct rcu_head *head)
  944. {
  945. struct inode *inode = container_of(head, struct inode, i_rcu);
  946. kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
  947. }
  948. static void ext4_destroy_inode(struct inode *inode)
  949. {
  950. if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
  951. ext4_msg(inode->i_sb, KERN_ERR,
  952. "Inode %lu (%p): orphan list check failed!",
  953. inode->i_ino, EXT4_I(inode));
  954. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
  955. EXT4_I(inode), sizeof(struct ext4_inode_info),
  956. true);
  957. dump_stack();
  958. }
  959. call_rcu(&inode->i_rcu, ext4_i_callback);
  960. }
  961. static void init_once(void *foo)
  962. {
  963. struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
  964. INIT_LIST_HEAD(&ei->i_orphan);
  965. init_rwsem(&ei->xattr_sem);
  966. init_rwsem(&ei->i_data_sem);
  967. init_rwsem(&ei->i_mmap_sem);
  968. inode_init_once(&ei->vfs_inode);
  969. }
  970. static int __init init_inodecache(void)
  971. {
  972. ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
  973. sizeof(struct ext4_inode_info), 0,
  974. (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
  975. SLAB_ACCOUNT),
  976. offsetof(struct ext4_inode_info, i_data),
  977. sizeof_field(struct ext4_inode_info, i_data),
  978. init_once);
  979. if (ext4_inode_cachep == NULL)
  980. return -ENOMEM;
  981. return 0;
  982. }
  983. static void destroy_inodecache(void)
  984. {
  985. /*
  986. * Make sure all delayed rcu free inodes are flushed before we
  987. * destroy cache.
  988. */
  989. rcu_barrier();
  990. kmem_cache_destroy(ext4_inode_cachep);
  991. }
  992. void ext4_clear_inode(struct inode *inode)
  993. {
  994. invalidate_inode_buffers(inode);
  995. clear_inode(inode);
  996. dquot_drop(inode);
  997. ext4_discard_preallocations(inode);
  998. ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
  999. if (EXT4_I(inode)->jinode) {
  1000. jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
  1001. EXT4_I(inode)->jinode);
  1002. jbd2_free_inode(EXT4_I(inode)->jinode);
  1003. EXT4_I(inode)->jinode = NULL;
  1004. }
  1005. fscrypt_put_encryption_info(inode);
  1006. }
  1007. static struct inode *ext4_nfs_get_inode(struct super_block *sb,
  1008. u64 ino, u32 generation)
  1009. {
  1010. struct inode *inode;
  1011. if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
  1012. return ERR_PTR(-ESTALE);
  1013. if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
  1014. return ERR_PTR(-ESTALE);
  1015. /* iget isn't really right if the inode is currently unallocated!!
  1016. *
  1017. * ext4_read_inode will return a bad_inode if the inode had been
  1018. * deleted, so we should be safe.
  1019. *
  1020. * Currently we don't know the generation for parent directory, so
  1021. * a generation of 0 means "accept any"
  1022. */
  1023. inode = ext4_iget_normal(sb, ino);
  1024. if (IS_ERR(inode))
  1025. return ERR_CAST(inode);
  1026. if (generation && inode->i_generation != generation) {
  1027. iput(inode);
  1028. return ERR_PTR(-ESTALE);
  1029. }
  1030. return inode;
  1031. }
  1032. static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
  1033. int fh_len, int fh_type)
  1034. {
  1035. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  1036. ext4_nfs_get_inode);
  1037. }
  1038. static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
  1039. int fh_len, int fh_type)
  1040. {
  1041. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  1042. ext4_nfs_get_inode);
  1043. }
  1044. /*
  1045. * Try to release metadata pages (indirect blocks, directories) which are
  1046. * mapped via the block device. Since these pages could have journal heads
  1047. * which would prevent try_to_free_buffers() from freeing them, we must use
  1048. * jbd2 layer's try_to_free_buffers() function to release them.
  1049. */
  1050. static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
  1051. gfp_t wait)
  1052. {
  1053. journal_t *journal = EXT4_SB(sb)->s_journal;
  1054. WARN_ON(PageChecked(page));
  1055. if (!page_has_buffers(page))
  1056. return 0;
  1057. if (journal)
  1058. return jbd2_journal_try_to_free_buffers(journal, page,
  1059. wait & ~__GFP_DIRECT_RECLAIM);
  1060. return try_to_free_buffers(page);
  1061. }
  1062. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1063. static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
  1064. {
  1065. return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1066. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
  1067. }
  1068. static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
  1069. void *fs_data)
  1070. {
  1071. handle_t *handle = fs_data;
  1072. int res, res2, credits, retries = 0;
  1073. /*
  1074. * Encrypting the root directory is not allowed because e2fsck expects
  1075. * lost+found to exist and be unencrypted, and encrypting the root
  1076. * directory would imply encrypting the lost+found directory as well as
  1077. * the filename "lost+found" itself.
  1078. */
  1079. if (inode->i_ino == EXT4_ROOT_INO)
  1080. return -EPERM;
  1081. if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
  1082. return -EINVAL;
  1083. res = ext4_convert_inline_data(inode);
  1084. if (res)
  1085. return res;
  1086. /*
  1087. * If a journal handle was specified, then the encryption context is
  1088. * being set on a new inode via inheritance and is part of a larger
  1089. * transaction to create the inode. Otherwise the encryption context is
  1090. * being set on an existing inode in its own transaction. Only in the
  1091. * latter case should the "retry on ENOSPC" logic be used.
  1092. */
  1093. if (handle) {
  1094. res = ext4_xattr_set_handle(handle, inode,
  1095. EXT4_XATTR_INDEX_ENCRYPTION,
  1096. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1097. ctx, len, 0);
  1098. if (!res) {
  1099. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1100. ext4_clear_inode_state(inode,
  1101. EXT4_STATE_MAY_INLINE_DATA);
  1102. /*
  1103. * Update inode->i_flags - S_ENCRYPTED will be enabled,
  1104. * S_DAX may be disabled
  1105. */
  1106. ext4_set_inode_flags(inode);
  1107. }
  1108. return res;
  1109. }
  1110. res = dquot_initialize(inode);
  1111. if (res)
  1112. return res;
  1113. retry:
  1114. res = ext4_xattr_set_credits(inode, len, false /* is_create */,
  1115. &credits);
  1116. if (res)
  1117. return res;
  1118. handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
  1119. if (IS_ERR(handle))
  1120. return PTR_ERR(handle);
  1121. res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1122. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1123. ctx, len, 0);
  1124. if (!res) {
  1125. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1126. /*
  1127. * Update inode->i_flags - S_ENCRYPTED will be enabled,
  1128. * S_DAX may be disabled
  1129. */
  1130. ext4_set_inode_flags(inode);
  1131. res = ext4_mark_inode_dirty(handle, inode);
  1132. if (res)
  1133. EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
  1134. }
  1135. res2 = ext4_journal_stop(handle);
  1136. if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  1137. goto retry;
  1138. if (!res)
  1139. res = res2;
  1140. return res;
  1141. }
  1142. static bool ext4_dummy_context(struct inode *inode)
  1143. {
  1144. return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
  1145. }
  1146. static const struct fscrypt_operations ext4_cryptops = {
  1147. .key_prefix = "ext4:",
  1148. .get_context = ext4_get_context,
  1149. .set_context = ext4_set_context,
  1150. .dummy_context = ext4_dummy_context,
  1151. .empty_dir = ext4_empty_dir,
  1152. .max_namelen = EXT4_NAME_LEN,
  1153. };
  1154. #endif
  1155. #ifdef CONFIG_QUOTA
  1156. static const char * const quotatypes[] = INITQFNAMES;
  1157. #define QTYPE2NAME(t) (quotatypes[t])
  1158. static int ext4_write_dquot(struct dquot *dquot);
  1159. static int ext4_acquire_dquot(struct dquot *dquot);
  1160. static int ext4_release_dquot(struct dquot *dquot);
  1161. static int ext4_mark_dquot_dirty(struct dquot *dquot);
  1162. static int ext4_write_info(struct super_block *sb, int type);
  1163. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  1164. const struct path *path);
  1165. static int ext4_quota_on_mount(struct super_block *sb, int type);
  1166. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  1167. size_t len, loff_t off);
  1168. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  1169. const char *data, size_t len, loff_t off);
  1170. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  1171. unsigned int flags);
  1172. static int ext4_enable_quotas(struct super_block *sb);
  1173. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
  1174. static struct dquot **ext4_get_dquots(struct inode *inode)
  1175. {
  1176. return EXT4_I(inode)->i_dquot;
  1177. }
  1178. static const struct dquot_operations ext4_quota_operations = {
  1179. .get_reserved_space = ext4_get_reserved_space,
  1180. .write_dquot = ext4_write_dquot,
  1181. .acquire_dquot = ext4_acquire_dquot,
  1182. .release_dquot = ext4_release_dquot,
  1183. .mark_dirty = ext4_mark_dquot_dirty,
  1184. .write_info = ext4_write_info,
  1185. .alloc_dquot = dquot_alloc,
  1186. .destroy_dquot = dquot_destroy,
  1187. .get_projid = ext4_get_projid,
  1188. .get_inode_usage = ext4_get_inode_usage,
  1189. .get_next_id = ext4_get_next_id,
  1190. };
  1191. static const struct quotactl_ops ext4_qctl_operations = {
  1192. .quota_on = ext4_quota_on,
  1193. .quota_off = ext4_quota_off,
  1194. .quota_sync = dquot_quota_sync,
  1195. .get_state = dquot_get_state,
  1196. .set_info = dquot_set_dqinfo,
  1197. .get_dqblk = dquot_get_dqblk,
  1198. .set_dqblk = dquot_set_dqblk,
  1199. .get_nextdqblk = dquot_get_next_dqblk,
  1200. };
  1201. #endif
  1202. static const struct super_operations ext4_sops = {
  1203. .alloc_inode = ext4_alloc_inode,
  1204. .destroy_inode = ext4_destroy_inode,
  1205. .write_inode = ext4_write_inode,
  1206. .dirty_inode = ext4_dirty_inode,
  1207. .drop_inode = ext4_drop_inode,
  1208. .evict_inode = ext4_evict_inode,
  1209. .put_super = ext4_put_super,
  1210. .sync_fs = ext4_sync_fs,
  1211. .freeze_fs = ext4_freeze,
  1212. .unfreeze_fs = ext4_unfreeze,
  1213. .statfs = ext4_statfs,
  1214. .remount_fs = ext4_remount,
  1215. .show_options = ext4_show_options,
  1216. #ifdef CONFIG_QUOTA
  1217. .quota_read = ext4_quota_read,
  1218. .quota_write = ext4_quota_write,
  1219. .get_dquots = ext4_get_dquots,
  1220. #endif
  1221. .bdev_try_to_free_page = bdev_try_to_free_page,
  1222. };
  1223. static const struct export_operations ext4_export_ops = {
  1224. .fh_to_dentry = ext4_fh_to_dentry,
  1225. .fh_to_parent = ext4_fh_to_parent,
  1226. .get_parent = ext4_get_parent,
  1227. };
  1228. enum {
  1229. Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
  1230. Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
  1231. Opt_nouid32, Opt_debug, Opt_removed,
  1232. Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
  1233. Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
  1234. Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
  1235. Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
  1236. Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
  1237. Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
  1238. Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
  1239. Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
  1240. Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
  1241. Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
  1242. Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
  1243. Opt_nowarn_on_error, Opt_mblk_io_submit,
  1244. Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
  1245. Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
  1246. Opt_inode_readahead_blks, Opt_journal_ioprio,
  1247. Opt_dioread_nolock, Opt_dioread_lock,
  1248. Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
  1249. Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
  1250. };
  1251. static const match_table_t tokens = {
  1252. {Opt_bsd_df, "bsddf"},
  1253. {Opt_minix_df, "minixdf"},
  1254. {Opt_grpid, "grpid"},
  1255. {Opt_grpid, "bsdgroups"},
  1256. {Opt_nogrpid, "nogrpid"},
  1257. {Opt_nogrpid, "sysvgroups"},
  1258. {Opt_resgid, "resgid=%u"},
  1259. {Opt_resuid, "resuid=%u"},
  1260. {Opt_sb, "sb=%u"},
  1261. {Opt_err_cont, "errors=continue"},
  1262. {Opt_err_panic, "errors=panic"},
  1263. {Opt_err_ro, "errors=remount-ro"},
  1264. {Opt_nouid32, "nouid32"},
  1265. {Opt_debug, "debug"},
  1266. {Opt_removed, "oldalloc"},
  1267. {Opt_removed, "orlov"},
  1268. {Opt_user_xattr, "user_xattr"},
  1269. {Opt_nouser_xattr, "nouser_xattr"},
  1270. {Opt_acl, "acl"},
  1271. {Opt_noacl, "noacl"},
  1272. {Opt_noload, "norecovery"},
  1273. {Opt_noload, "noload"},
  1274. {Opt_removed, "nobh"},
  1275. {Opt_removed, "bh"},
  1276. {Opt_commit, "commit=%u"},
  1277. {Opt_min_batch_time, "min_batch_time=%u"},
  1278. {Opt_max_batch_time, "max_batch_time=%u"},
  1279. {Opt_journal_dev, "journal_dev=%u"},
  1280. {Opt_journal_path, "journal_path=%s"},
  1281. {Opt_journal_checksum, "journal_checksum"},
  1282. {Opt_nojournal_checksum, "nojournal_checksum"},
  1283. {Opt_journal_async_commit, "journal_async_commit"},
  1284. {Opt_abort, "abort"},
  1285. {Opt_data_journal, "data=journal"},
  1286. {Opt_data_ordered, "data=ordered"},
  1287. {Opt_data_writeback, "data=writeback"},
  1288. {Opt_data_err_abort, "data_err=abort"},
  1289. {Opt_data_err_ignore, "data_err=ignore"},
  1290. {Opt_offusrjquota, "usrjquota="},
  1291. {Opt_usrjquota, "usrjquota=%s"},
  1292. {Opt_offgrpjquota, "grpjquota="},
  1293. {Opt_grpjquota, "grpjquota=%s"},
  1294. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  1295. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  1296. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  1297. {Opt_grpquota, "grpquota"},
  1298. {Opt_noquota, "noquota"},
  1299. {Opt_quota, "quota"},
  1300. {Opt_usrquota, "usrquota"},
  1301. {Opt_prjquota, "prjquota"},
  1302. {Opt_barrier, "barrier=%u"},
  1303. {Opt_barrier, "barrier"},
  1304. {Opt_nobarrier, "nobarrier"},
  1305. {Opt_i_version, "i_version"},
  1306. {Opt_dax, "dax"},
  1307. {Opt_stripe, "stripe=%u"},
  1308. {Opt_delalloc, "delalloc"},
  1309. {Opt_warn_on_error, "warn_on_error"},
  1310. {Opt_nowarn_on_error, "nowarn_on_error"},
  1311. {Opt_lazytime, "lazytime"},
  1312. {Opt_nolazytime, "nolazytime"},
  1313. {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
  1314. {Opt_nodelalloc, "nodelalloc"},
  1315. {Opt_removed, "mblk_io_submit"},
  1316. {Opt_removed, "nomblk_io_submit"},
  1317. {Opt_block_validity, "block_validity"},
  1318. {Opt_noblock_validity, "noblock_validity"},
  1319. {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
  1320. {Opt_journal_ioprio, "journal_ioprio=%u"},
  1321. {Opt_auto_da_alloc, "auto_da_alloc=%u"},
  1322. {Opt_auto_da_alloc, "auto_da_alloc"},
  1323. {Opt_noauto_da_alloc, "noauto_da_alloc"},
  1324. {Opt_dioread_nolock, "dioread_nolock"},
  1325. {Opt_dioread_lock, "dioread_lock"},
  1326. {Opt_discard, "discard"},
  1327. {Opt_nodiscard, "nodiscard"},
  1328. {Opt_init_itable, "init_itable=%u"},
  1329. {Opt_init_itable, "init_itable"},
  1330. {Opt_noinit_itable, "noinit_itable"},
  1331. {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
  1332. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  1333. {Opt_nombcache, "nombcache"},
  1334. {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
  1335. {Opt_removed, "check=none"}, /* mount option from ext2/3 */
  1336. {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
  1337. {Opt_removed, "reservation"}, /* mount option from ext2/3 */
  1338. {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
  1339. {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
  1340. {Opt_err, NULL},
  1341. };
  1342. static ext4_fsblk_t get_sb_block(void **data)
  1343. {
  1344. ext4_fsblk_t sb_block;
  1345. char *options = (char *) *data;
  1346. if (!options || strncmp(options, "sb=", 3) != 0)
  1347. return 1; /* Default location */
  1348. options += 3;
  1349. /* TODO: use simple_strtoll with >32bit ext4 */
  1350. sb_block = simple_strtoul(options, &options, 0);
  1351. if (*options && *options != ',') {
  1352. printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
  1353. (char *) *data);
  1354. return 1;
  1355. }
  1356. if (*options == ',')
  1357. options++;
  1358. *data = (void *) options;
  1359. return sb_block;
  1360. }
  1361. #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
  1362. static const char deprecated_msg[] =
  1363. "Mount option \"%s\" will be removed by %s\n"
  1364. "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
  1365. #ifdef CONFIG_QUOTA
  1366. static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
  1367. {
  1368. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1369. char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
  1370. int ret = -1;
  1371. if (sb_any_quota_loaded(sb) && !old_qname) {
  1372. ext4_msg(sb, KERN_ERR,
  1373. "Cannot change journaled "
  1374. "quota options when quota turned on");
  1375. return -1;
  1376. }
  1377. if (ext4_has_feature_quota(sb)) {
  1378. ext4_msg(sb, KERN_INFO, "Journaled quota options "
  1379. "ignored when QUOTA feature is enabled");
  1380. return 1;
  1381. }
  1382. qname = match_strdup(args);
  1383. if (!qname) {
  1384. ext4_msg(sb, KERN_ERR,
  1385. "Not enough memory for storing quotafile name");
  1386. return -1;
  1387. }
  1388. if (old_qname) {
  1389. if (strcmp(old_qname, qname) == 0)
  1390. ret = 1;
  1391. else
  1392. ext4_msg(sb, KERN_ERR,
  1393. "%s quota file already specified",
  1394. QTYPE2NAME(qtype));
  1395. goto errout;
  1396. }
  1397. if (strchr(qname, '/')) {
  1398. ext4_msg(sb, KERN_ERR,
  1399. "quotafile must be on filesystem root");
  1400. goto errout;
  1401. }
  1402. rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
  1403. set_opt(sb, QUOTA);
  1404. return 1;
  1405. errout:
  1406. kfree(qname);
  1407. return ret;
  1408. }
  1409. static int clear_qf_name(struct super_block *sb, int qtype)
  1410. {
  1411. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1412. char *old_qname = get_qf_name(sb, sbi, qtype);
  1413. if (sb_any_quota_loaded(sb) && old_qname) {
  1414. ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
  1415. " when quota turned on");
  1416. return -1;
  1417. }
  1418. rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
  1419. synchronize_rcu();
  1420. kfree(old_qname);
  1421. return 1;
  1422. }
  1423. #endif
  1424. #define MOPT_SET 0x0001
  1425. #define MOPT_CLEAR 0x0002
  1426. #define MOPT_NOSUPPORT 0x0004
  1427. #define MOPT_EXPLICIT 0x0008
  1428. #define MOPT_CLEAR_ERR 0x0010
  1429. #define MOPT_GTE0 0x0020
  1430. #ifdef CONFIG_QUOTA
  1431. #define MOPT_Q 0
  1432. #define MOPT_QFMT 0x0040
  1433. #else
  1434. #define MOPT_Q MOPT_NOSUPPORT
  1435. #define MOPT_QFMT MOPT_NOSUPPORT
  1436. #endif
  1437. #define MOPT_DATAJ 0x0080
  1438. #define MOPT_NO_EXT2 0x0100
  1439. #define MOPT_NO_EXT3 0x0200
  1440. #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
  1441. #define MOPT_STRING 0x0400
  1442. static const struct mount_opts {
  1443. int token;
  1444. int mount_opt;
  1445. int flags;
  1446. } ext4_mount_opts[] = {
  1447. {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
  1448. {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
  1449. {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
  1450. {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
  1451. {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
  1452. {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
  1453. {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1454. MOPT_EXT4_ONLY | MOPT_SET},
  1455. {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1456. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1457. {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
  1458. {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
  1459. {Opt_delalloc, EXT4_MOUNT_DELALLOC,
  1460. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1461. {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
  1462. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1463. {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
  1464. {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
  1465. {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1466. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1467. {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1468. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1469. {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
  1470. EXT4_MOUNT_JOURNAL_CHECKSUM),
  1471. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1472. {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
  1473. {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
  1474. {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
  1475. {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
  1476. {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
  1477. MOPT_NO_EXT2},
  1478. {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
  1479. MOPT_NO_EXT2},
  1480. {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
  1481. {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
  1482. {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
  1483. {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
  1484. {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
  1485. {Opt_commit, 0, MOPT_GTE0},
  1486. {Opt_max_batch_time, 0, MOPT_GTE0},
  1487. {Opt_min_batch_time, 0, MOPT_GTE0},
  1488. {Opt_inode_readahead_blks, 0, MOPT_GTE0},
  1489. {Opt_init_itable, 0, MOPT_GTE0},
  1490. {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
  1491. {Opt_stripe, 0, MOPT_GTE0},
  1492. {Opt_resuid, 0, MOPT_GTE0},
  1493. {Opt_resgid, 0, MOPT_GTE0},
  1494. {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1495. {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
  1496. {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1497. {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1498. {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1499. {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
  1500. MOPT_NO_EXT2 | MOPT_DATAJ},
  1501. {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
  1502. {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
  1503. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  1504. {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
  1505. {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
  1506. #else
  1507. {Opt_acl, 0, MOPT_NOSUPPORT},
  1508. {Opt_noacl, 0, MOPT_NOSUPPORT},
  1509. #endif
  1510. {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
  1511. {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
  1512. {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
  1513. {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
  1514. {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
  1515. MOPT_SET | MOPT_Q},
  1516. {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
  1517. MOPT_SET | MOPT_Q},
  1518. {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
  1519. MOPT_SET | MOPT_Q},
  1520. {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
  1521. EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
  1522. MOPT_CLEAR | MOPT_Q},
  1523. {Opt_usrjquota, 0, MOPT_Q},
  1524. {Opt_grpjquota, 0, MOPT_Q},
  1525. {Opt_offusrjquota, 0, MOPT_Q},
  1526. {Opt_offgrpjquota, 0, MOPT_Q},
  1527. {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
  1528. {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
  1529. {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
  1530. {Opt_max_dir_size_kb, 0, MOPT_GTE0},
  1531. {Opt_test_dummy_encryption, 0, MOPT_GTE0},
  1532. {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
  1533. {Opt_err, 0, 0}
  1534. };
  1535. static int handle_mount_opt(struct super_block *sb, char *opt, int token,
  1536. substring_t *args, unsigned long *journal_devnum,
  1537. unsigned int *journal_ioprio, int is_remount)
  1538. {
  1539. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1540. const struct mount_opts *m;
  1541. kuid_t uid;
  1542. kgid_t gid;
  1543. int arg = 0;
  1544. #ifdef CONFIG_QUOTA
  1545. if (token == Opt_usrjquota)
  1546. return set_qf_name(sb, USRQUOTA, &args[0]);
  1547. else if (token == Opt_grpjquota)
  1548. return set_qf_name(sb, GRPQUOTA, &args[0]);
  1549. else if (token == Opt_offusrjquota)
  1550. return clear_qf_name(sb, USRQUOTA);
  1551. else if (token == Opt_offgrpjquota)
  1552. return clear_qf_name(sb, GRPQUOTA);
  1553. #endif
  1554. switch (token) {
  1555. case Opt_noacl:
  1556. case Opt_nouser_xattr:
  1557. ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
  1558. break;
  1559. case Opt_sb:
  1560. return 1; /* handled by get_sb_block() */
  1561. case Opt_removed:
  1562. ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
  1563. return 1;
  1564. case Opt_abort:
  1565. sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
  1566. return 1;
  1567. case Opt_i_version:
  1568. sb->s_flags |= SB_I_VERSION;
  1569. return 1;
  1570. case Opt_lazytime:
  1571. sb->s_flags |= SB_LAZYTIME;
  1572. return 1;
  1573. case Opt_nolazytime:
  1574. sb->s_flags &= ~SB_LAZYTIME;
  1575. return 1;
  1576. }
  1577. for (m = ext4_mount_opts; m->token != Opt_err; m++)
  1578. if (token == m->token)
  1579. break;
  1580. if (m->token == Opt_err) {
  1581. ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
  1582. "or missing value", opt);
  1583. return -1;
  1584. }
  1585. if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
  1586. ext4_msg(sb, KERN_ERR,
  1587. "Mount option \"%s\" incompatible with ext2", opt);
  1588. return -1;
  1589. }
  1590. if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
  1591. ext4_msg(sb, KERN_ERR,
  1592. "Mount option \"%s\" incompatible with ext3", opt);
  1593. return -1;
  1594. }
  1595. if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
  1596. return -1;
  1597. if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
  1598. return -1;
  1599. if (m->flags & MOPT_EXPLICIT) {
  1600. if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
  1601. set_opt2(sb, EXPLICIT_DELALLOC);
  1602. } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
  1603. set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
  1604. } else
  1605. return -1;
  1606. }
  1607. if (m->flags & MOPT_CLEAR_ERR)
  1608. clear_opt(sb, ERRORS_MASK);
  1609. if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
  1610. ext4_msg(sb, KERN_ERR, "Cannot change quota "
  1611. "options when quota turned on");
  1612. return -1;
  1613. }
  1614. if (m->flags & MOPT_NOSUPPORT) {
  1615. ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
  1616. } else if (token == Opt_commit) {
  1617. if (arg == 0)
  1618. arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
  1619. sbi->s_commit_interval = HZ * arg;
  1620. } else if (token == Opt_debug_want_extra_isize) {
  1621. sbi->s_want_extra_isize = arg;
  1622. } else if (token == Opt_max_batch_time) {
  1623. sbi->s_max_batch_time = arg;
  1624. } else if (token == Opt_min_batch_time) {
  1625. sbi->s_min_batch_time = arg;
  1626. } else if (token == Opt_inode_readahead_blks) {
  1627. if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
  1628. ext4_msg(sb, KERN_ERR,
  1629. "EXT4-fs: inode_readahead_blks must be "
  1630. "0 or a power of 2 smaller than 2^31");
  1631. return -1;
  1632. }
  1633. sbi->s_inode_readahead_blks = arg;
  1634. } else if (token == Opt_init_itable) {
  1635. set_opt(sb, INIT_INODE_TABLE);
  1636. if (!args->from)
  1637. arg = EXT4_DEF_LI_WAIT_MULT;
  1638. sbi->s_li_wait_mult = arg;
  1639. } else if (token == Opt_max_dir_size_kb) {
  1640. sbi->s_max_dir_size_kb = arg;
  1641. } else if (token == Opt_stripe) {
  1642. sbi->s_stripe = arg;
  1643. } else if (token == Opt_resuid) {
  1644. uid = make_kuid(current_user_ns(), arg);
  1645. if (!uid_valid(uid)) {
  1646. ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
  1647. return -1;
  1648. }
  1649. sbi->s_resuid = uid;
  1650. } else if (token == Opt_resgid) {
  1651. gid = make_kgid(current_user_ns(), arg);
  1652. if (!gid_valid(gid)) {
  1653. ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
  1654. return -1;
  1655. }
  1656. sbi->s_resgid = gid;
  1657. } else if (token == Opt_journal_dev) {
  1658. if (is_remount) {
  1659. ext4_msg(sb, KERN_ERR,
  1660. "Cannot specify journal on remount");
  1661. return -1;
  1662. }
  1663. *journal_devnum = arg;
  1664. } else if (token == Opt_journal_path) {
  1665. char *journal_path;
  1666. struct inode *journal_inode;
  1667. struct path path;
  1668. int error;
  1669. if (is_remount) {
  1670. ext4_msg(sb, KERN_ERR,
  1671. "Cannot specify journal on remount");
  1672. return -1;
  1673. }
  1674. journal_path = match_strdup(&args[0]);
  1675. if (!journal_path) {
  1676. ext4_msg(sb, KERN_ERR, "error: could not dup "
  1677. "journal device string");
  1678. return -1;
  1679. }
  1680. error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
  1681. if (error) {
  1682. ext4_msg(sb, KERN_ERR, "error: could not find "
  1683. "journal device path: error %d", error);
  1684. kfree(journal_path);
  1685. return -1;
  1686. }
  1687. journal_inode = d_inode(path.dentry);
  1688. if (!S_ISBLK(journal_inode->i_mode)) {
  1689. ext4_msg(sb, KERN_ERR, "error: journal path %s "
  1690. "is not a block device", journal_path);
  1691. path_put(&path);
  1692. kfree(journal_path);
  1693. return -1;
  1694. }
  1695. *journal_devnum = new_encode_dev(journal_inode->i_rdev);
  1696. path_put(&path);
  1697. kfree(journal_path);
  1698. } else if (token == Opt_journal_ioprio) {
  1699. if (arg > 7) {
  1700. ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
  1701. " (must be 0-7)");
  1702. return -1;
  1703. }
  1704. *journal_ioprio =
  1705. IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
  1706. } else if (token == Opt_test_dummy_encryption) {
  1707. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1708. sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
  1709. ext4_msg(sb, KERN_WARNING,
  1710. "Test dummy encryption mode enabled");
  1711. #else
  1712. ext4_msg(sb, KERN_WARNING,
  1713. "Test dummy encryption mount option ignored");
  1714. #endif
  1715. } else if (m->flags & MOPT_DATAJ) {
  1716. if (is_remount) {
  1717. if (!sbi->s_journal)
  1718. ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
  1719. else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
  1720. ext4_msg(sb, KERN_ERR,
  1721. "Cannot change data mode on remount");
  1722. return -1;
  1723. }
  1724. } else {
  1725. clear_opt(sb, DATA_FLAGS);
  1726. sbi->s_mount_opt |= m->mount_opt;
  1727. }
  1728. #ifdef CONFIG_QUOTA
  1729. } else if (m->flags & MOPT_QFMT) {
  1730. if (sb_any_quota_loaded(sb) &&
  1731. sbi->s_jquota_fmt != m->mount_opt) {
  1732. ext4_msg(sb, KERN_ERR, "Cannot change journaled "
  1733. "quota options when quota turned on");
  1734. return -1;
  1735. }
  1736. if (ext4_has_feature_quota(sb)) {
  1737. ext4_msg(sb, KERN_INFO,
  1738. "Quota format mount options ignored "
  1739. "when QUOTA feature is enabled");
  1740. return 1;
  1741. }
  1742. sbi->s_jquota_fmt = m->mount_opt;
  1743. #endif
  1744. } else if (token == Opt_dax) {
  1745. #ifdef CONFIG_FS_DAX
  1746. ext4_msg(sb, KERN_WARNING,
  1747. "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
  1748. sbi->s_mount_opt |= m->mount_opt;
  1749. #else
  1750. ext4_msg(sb, KERN_INFO, "dax option not supported");
  1751. return -1;
  1752. #endif
  1753. } else if (token == Opt_data_err_abort) {
  1754. sbi->s_mount_opt |= m->mount_opt;
  1755. } else if (token == Opt_data_err_ignore) {
  1756. sbi->s_mount_opt &= ~m->mount_opt;
  1757. } else {
  1758. if (!args->from)
  1759. arg = 1;
  1760. if (m->flags & MOPT_CLEAR)
  1761. arg = !arg;
  1762. else if (unlikely(!(m->flags & MOPT_SET))) {
  1763. ext4_msg(sb, KERN_WARNING,
  1764. "buggy handling of option %s", opt);
  1765. WARN_ON(1);
  1766. return -1;
  1767. }
  1768. if (arg != 0)
  1769. sbi->s_mount_opt |= m->mount_opt;
  1770. else
  1771. sbi->s_mount_opt &= ~m->mount_opt;
  1772. }
  1773. return 1;
  1774. }
  1775. static int parse_options(char *options, struct super_block *sb,
  1776. unsigned long *journal_devnum,
  1777. unsigned int *journal_ioprio,
  1778. int is_remount)
  1779. {
  1780. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1781. char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
  1782. substring_t args[MAX_OPT_ARGS];
  1783. int token;
  1784. if (!options)
  1785. return 1;
  1786. while ((p = strsep(&options, ",")) != NULL) {
  1787. if (!*p)
  1788. continue;
  1789. /*
  1790. * Initialize args struct so we know whether arg was
  1791. * found; some options take optional arguments.
  1792. */
  1793. args[0].to = args[0].from = NULL;
  1794. token = match_token(p, tokens, args);
  1795. if (handle_mount_opt(sb, p, token, args, journal_devnum,
  1796. journal_ioprio, is_remount) < 0)
  1797. return 0;
  1798. }
  1799. #ifdef CONFIG_QUOTA
  1800. /*
  1801. * We do the test below only for project quotas. 'usrquota' and
  1802. * 'grpquota' mount options are allowed even without quota feature
  1803. * to support legacy quotas in quota files.
  1804. */
  1805. if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
  1806. ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
  1807. "Cannot enable project quota enforcement.");
  1808. return 0;
  1809. }
  1810. usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
  1811. grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
  1812. if (usr_qf_name || grp_qf_name) {
  1813. if (test_opt(sb, USRQUOTA) && usr_qf_name)
  1814. clear_opt(sb, USRQUOTA);
  1815. if (test_opt(sb, GRPQUOTA) && grp_qf_name)
  1816. clear_opt(sb, GRPQUOTA);
  1817. if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
  1818. ext4_msg(sb, KERN_ERR, "old and new quota "
  1819. "format mixing");
  1820. return 0;
  1821. }
  1822. if (!sbi->s_jquota_fmt) {
  1823. ext4_msg(sb, KERN_ERR, "journaled quota format "
  1824. "not specified");
  1825. return 0;
  1826. }
  1827. }
  1828. #endif
  1829. if (test_opt(sb, DIOREAD_NOLOCK)) {
  1830. int blocksize =
  1831. BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
  1832. if (blocksize < PAGE_SIZE) {
  1833. ext4_msg(sb, KERN_ERR, "can't mount with "
  1834. "dioread_nolock if block size != PAGE_SIZE");
  1835. return 0;
  1836. }
  1837. }
  1838. return 1;
  1839. }
  1840. static inline void ext4_show_quota_options(struct seq_file *seq,
  1841. struct super_block *sb)
  1842. {
  1843. #if defined(CONFIG_QUOTA)
  1844. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1845. char *usr_qf_name, *grp_qf_name;
  1846. if (sbi->s_jquota_fmt) {
  1847. char *fmtname = "";
  1848. switch (sbi->s_jquota_fmt) {
  1849. case QFMT_VFS_OLD:
  1850. fmtname = "vfsold";
  1851. break;
  1852. case QFMT_VFS_V0:
  1853. fmtname = "vfsv0";
  1854. break;
  1855. case QFMT_VFS_V1:
  1856. fmtname = "vfsv1";
  1857. break;
  1858. }
  1859. seq_printf(seq, ",jqfmt=%s", fmtname);
  1860. }
  1861. rcu_read_lock();
  1862. usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
  1863. grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
  1864. if (usr_qf_name)
  1865. seq_show_option(seq, "usrjquota", usr_qf_name);
  1866. if (grp_qf_name)
  1867. seq_show_option(seq, "grpjquota", grp_qf_name);
  1868. rcu_read_unlock();
  1869. #endif
  1870. }
  1871. static const char *token2str(int token)
  1872. {
  1873. const struct match_token *t;
  1874. for (t = tokens; t->token != Opt_err; t++)
  1875. if (t->token == token && !strchr(t->pattern, '='))
  1876. break;
  1877. return t->pattern;
  1878. }
  1879. /*
  1880. * Show an option if
  1881. * - it's set to a non-default value OR
  1882. * - if the per-sb default is different from the global default
  1883. */
  1884. static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
  1885. int nodefs)
  1886. {
  1887. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1888. struct ext4_super_block *es = sbi->s_es;
  1889. int def_errors, def_mount_opt = sbi->s_def_mount_opt;
  1890. const struct mount_opts *m;
  1891. char sep = nodefs ? '\n' : ',';
  1892. #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
  1893. #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
  1894. if (sbi->s_sb_block != 1)
  1895. SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
  1896. for (m = ext4_mount_opts; m->token != Opt_err; m++) {
  1897. int want_set = m->flags & MOPT_SET;
  1898. if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
  1899. (m->flags & MOPT_CLEAR_ERR))
  1900. continue;
  1901. if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
  1902. continue; /* skip if same as the default */
  1903. if ((want_set &&
  1904. (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
  1905. (!want_set && (sbi->s_mount_opt & m->mount_opt)))
  1906. continue; /* select Opt_noFoo vs Opt_Foo */
  1907. SEQ_OPTS_PRINT("%s", token2str(m->token));
  1908. }
  1909. if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
  1910. le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
  1911. SEQ_OPTS_PRINT("resuid=%u",
  1912. from_kuid_munged(&init_user_ns, sbi->s_resuid));
  1913. if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
  1914. le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
  1915. SEQ_OPTS_PRINT("resgid=%u",
  1916. from_kgid_munged(&init_user_ns, sbi->s_resgid));
  1917. def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
  1918. if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
  1919. SEQ_OPTS_PUTS("errors=remount-ro");
  1920. if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
  1921. SEQ_OPTS_PUTS("errors=continue");
  1922. if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
  1923. SEQ_OPTS_PUTS("errors=panic");
  1924. if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
  1925. SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
  1926. if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
  1927. SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
  1928. if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
  1929. SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
  1930. if (sb->s_flags & SB_I_VERSION)
  1931. SEQ_OPTS_PUTS("i_version");
  1932. if (nodefs || sbi->s_stripe)
  1933. SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
  1934. if (nodefs || EXT4_MOUNT_DATA_FLAGS &
  1935. (sbi->s_mount_opt ^ def_mount_opt)) {
  1936. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  1937. SEQ_OPTS_PUTS("data=journal");
  1938. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  1939. SEQ_OPTS_PUTS("data=ordered");
  1940. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
  1941. SEQ_OPTS_PUTS("data=writeback");
  1942. }
  1943. if (nodefs ||
  1944. sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
  1945. SEQ_OPTS_PRINT("inode_readahead_blks=%u",
  1946. sbi->s_inode_readahead_blks);
  1947. if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
  1948. (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
  1949. SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
  1950. if (nodefs || sbi->s_max_dir_size_kb)
  1951. SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
  1952. if (test_opt(sb, DATA_ERR_ABORT))
  1953. SEQ_OPTS_PUTS("data_err=abort");
  1954. if (DUMMY_ENCRYPTION_ENABLED(sbi))
  1955. SEQ_OPTS_PUTS("test_dummy_encryption");
  1956. ext4_show_quota_options(seq, sb);
  1957. return 0;
  1958. }
  1959. static int ext4_show_options(struct seq_file *seq, struct dentry *root)
  1960. {
  1961. return _ext4_show_options(seq, root->d_sb, 0);
  1962. }
  1963. int ext4_seq_options_show(struct seq_file *seq, void *offset)
  1964. {
  1965. struct super_block *sb = seq->private;
  1966. int rc;
  1967. seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
  1968. rc = _ext4_show_options(seq, sb, 1);
  1969. seq_puts(seq, "\n");
  1970. return rc;
  1971. }
  1972. static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
  1973. int read_only)
  1974. {
  1975. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1976. int err = 0;
  1977. if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
  1978. ext4_msg(sb, KERN_ERR, "revision level too high, "
  1979. "forcing read-only mode");
  1980. err = -EROFS;
  1981. }
  1982. if (read_only)
  1983. goto done;
  1984. if (!(sbi->s_mount_state & EXT4_VALID_FS))
  1985. ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
  1986. "running e2fsck is recommended");
  1987. else if (sbi->s_mount_state & EXT4_ERROR_FS)
  1988. ext4_msg(sb, KERN_WARNING,
  1989. "warning: mounting fs with errors, "
  1990. "running e2fsck is recommended");
  1991. else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
  1992. le16_to_cpu(es->s_mnt_count) >=
  1993. (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
  1994. ext4_msg(sb, KERN_WARNING,
  1995. "warning: maximal mount count reached, "
  1996. "running e2fsck is recommended");
  1997. else if (le32_to_cpu(es->s_checkinterval) &&
  1998. (ext4_get_tstamp(es, s_lastcheck) +
  1999. le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
  2000. ext4_msg(sb, KERN_WARNING,
  2001. "warning: checktime reached, "
  2002. "running e2fsck is recommended");
  2003. if (!sbi->s_journal)
  2004. es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  2005. if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
  2006. es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
  2007. le16_add_cpu(&es->s_mnt_count, 1);
  2008. ext4_update_tstamp(es, s_mtime);
  2009. ext4_update_dynamic_rev(sb);
  2010. if (sbi->s_journal)
  2011. ext4_set_feature_journal_needs_recovery(sb);
  2012. err = ext4_commit_super(sb, 1);
  2013. done:
  2014. if (test_opt(sb, DEBUG))
  2015. printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
  2016. "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
  2017. sb->s_blocksize,
  2018. sbi->s_groups_count,
  2019. EXT4_BLOCKS_PER_GROUP(sb),
  2020. EXT4_INODES_PER_GROUP(sb),
  2021. sbi->s_mount_opt, sbi->s_mount_opt2);
  2022. cleancache_init_fs(sb);
  2023. return err;
  2024. }
  2025. int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
  2026. {
  2027. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2028. struct flex_groups *new_groups;
  2029. int size;
  2030. if (!sbi->s_log_groups_per_flex)
  2031. return 0;
  2032. size = ext4_flex_group(sbi, ngroup - 1) + 1;
  2033. if (size <= sbi->s_flex_groups_allocated)
  2034. return 0;
  2035. size = roundup_pow_of_two(size * sizeof(struct flex_groups));
  2036. new_groups = kvzalloc(size, GFP_KERNEL);
  2037. if (!new_groups) {
  2038. ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
  2039. size / (int) sizeof(struct flex_groups));
  2040. return -ENOMEM;
  2041. }
  2042. if (sbi->s_flex_groups) {
  2043. memcpy(new_groups, sbi->s_flex_groups,
  2044. (sbi->s_flex_groups_allocated *
  2045. sizeof(struct flex_groups)));
  2046. kvfree(sbi->s_flex_groups);
  2047. }
  2048. sbi->s_flex_groups = new_groups;
  2049. sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
  2050. return 0;
  2051. }
  2052. static int ext4_fill_flex_info(struct super_block *sb)
  2053. {
  2054. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2055. struct ext4_group_desc *gdp = NULL;
  2056. ext4_group_t flex_group;
  2057. int i, err;
  2058. sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
  2059. if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
  2060. sbi->s_log_groups_per_flex = 0;
  2061. return 1;
  2062. }
  2063. err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
  2064. if (err)
  2065. goto failed;
  2066. for (i = 0; i < sbi->s_groups_count; i++) {
  2067. gdp = ext4_get_group_desc(sb, i, NULL);
  2068. flex_group = ext4_flex_group(sbi, i);
  2069. atomic_add(ext4_free_inodes_count(sb, gdp),
  2070. &sbi->s_flex_groups[flex_group].free_inodes);
  2071. atomic64_add(ext4_free_group_clusters(sb, gdp),
  2072. &sbi->s_flex_groups[flex_group].free_clusters);
  2073. atomic_add(ext4_used_dirs_count(sb, gdp),
  2074. &sbi->s_flex_groups[flex_group].used_dirs);
  2075. }
  2076. return 1;
  2077. failed:
  2078. return 0;
  2079. }
  2080. static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
  2081. struct ext4_group_desc *gdp)
  2082. {
  2083. int offset = offsetof(struct ext4_group_desc, bg_checksum);
  2084. __u16 crc = 0;
  2085. __le32 le_group = cpu_to_le32(block_group);
  2086. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2087. if (ext4_has_metadata_csum(sbi->s_sb)) {
  2088. /* Use new metadata_csum algorithm */
  2089. __u32 csum32;
  2090. __u16 dummy_csum = 0;
  2091. csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
  2092. sizeof(le_group));
  2093. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
  2094. csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
  2095. sizeof(dummy_csum));
  2096. offset += sizeof(dummy_csum);
  2097. if (offset < sbi->s_desc_size)
  2098. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
  2099. sbi->s_desc_size - offset);
  2100. crc = csum32 & 0xFFFF;
  2101. goto out;
  2102. }
  2103. /* old crc16 code */
  2104. if (!ext4_has_feature_gdt_csum(sb))
  2105. return 0;
  2106. crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
  2107. crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
  2108. crc = crc16(crc, (__u8 *)gdp, offset);
  2109. offset += sizeof(gdp->bg_checksum); /* skip checksum */
  2110. /* for checksum of struct ext4_group_desc do the rest...*/
  2111. if (ext4_has_feature_64bit(sb) &&
  2112. offset < le16_to_cpu(sbi->s_es->s_desc_size))
  2113. crc = crc16(crc, (__u8 *)gdp + offset,
  2114. le16_to_cpu(sbi->s_es->s_desc_size) -
  2115. offset);
  2116. out:
  2117. return cpu_to_le16(crc);
  2118. }
  2119. int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
  2120. struct ext4_group_desc *gdp)
  2121. {
  2122. if (ext4_has_group_desc_csum(sb) &&
  2123. (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
  2124. return 0;
  2125. return 1;
  2126. }
  2127. void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
  2128. struct ext4_group_desc *gdp)
  2129. {
  2130. if (!ext4_has_group_desc_csum(sb))
  2131. return;
  2132. gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
  2133. }
  2134. /* Called at mount-time, super-block is locked */
  2135. static int ext4_check_descriptors(struct super_block *sb,
  2136. ext4_fsblk_t sb_block,
  2137. ext4_group_t *first_not_zeroed)
  2138. {
  2139. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2140. ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
  2141. ext4_fsblk_t last_block;
  2142. ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
  2143. ext4_fsblk_t block_bitmap;
  2144. ext4_fsblk_t inode_bitmap;
  2145. ext4_fsblk_t inode_table;
  2146. int flexbg_flag = 0;
  2147. ext4_group_t i, grp = sbi->s_groups_count;
  2148. if (ext4_has_feature_flex_bg(sb))
  2149. flexbg_flag = 1;
  2150. ext4_debug("Checking group descriptors");
  2151. for (i = 0; i < sbi->s_groups_count; i++) {
  2152. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  2153. if (i == sbi->s_groups_count - 1 || flexbg_flag)
  2154. last_block = ext4_blocks_count(sbi->s_es) - 1;
  2155. else
  2156. last_block = first_block +
  2157. (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  2158. if ((grp == sbi->s_groups_count) &&
  2159. !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2160. grp = i;
  2161. block_bitmap = ext4_block_bitmap(sb, gdp);
  2162. if (block_bitmap == sb_block) {
  2163. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2164. "Block bitmap for group %u overlaps "
  2165. "superblock", i);
  2166. if (!sb_rdonly(sb))
  2167. return 0;
  2168. }
  2169. if (block_bitmap >= sb_block + 1 &&
  2170. block_bitmap <= last_bg_block) {
  2171. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2172. "Block bitmap for group %u overlaps "
  2173. "block group descriptors", i);
  2174. if (!sb_rdonly(sb))
  2175. return 0;
  2176. }
  2177. if (block_bitmap < first_block || block_bitmap > last_block) {
  2178. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2179. "Block bitmap for group %u not in group "
  2180. "(block %llu)!", i, block_bitmap);
  2181. return 0;
  2182. }
  2183. inode_bitmap = ext4_inode_bitmap(sb, gdp);
  2184. if (inode_bitmap == sb_block) {
  2185. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2186. "Inode bitmap for group %u overlaps "
  2187. "superblock", i);
  2188. if (!sb_rdonly(sb))
  2189. return 0;
  2190. }
  2191. if (inode_bitmap >= sb_block + 1 &&
  2192. inode_bitmap <= last_bg_block) {
  2193. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2194. "Inode bitmap for group %u overlaps "
  2195. "block group descriptors", i);
  2196. if (!sb_rdonly(sb))
  2197. return 0;
  2198. }
  2199. if (inode_bitmap < first_block || inode_bitmap > last_block) {
  2200. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2201. "Inode bitmap for group %u not in group "
  2202. "(block %llu)!", i, inode_bitmap);
  2203. return 0;
  2204. }
  2205. inode_table = ext4_inode_table(sb, gdp);
  2206. if (inode_table == sb_block) {
  2207. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2208. "Inode table for group %u overlaps "
  2209. "superblock", i);
  2210. if (!sb_rdonly(sb))
  2211. return 0;
  2212. }
  2213. if (inode_table >= sb_block + 1 &&
  2214. inode_table <= last_bg_block) {
  2215. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2216. "Inode table for group %u overlaps "
  2217. "block group descriptors", i);
  2218. if (!sb_rdonly(sb))
  2219. return 0;
  2220. }
  2221. if (inode_table < first_block ||
  2222. inode_table + sbi->s_itb_per_group - 1 > last_block) {
  2223. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2224. "Inode table for group %u not in group "
  2225. "(block %llu)!", i, inode_table);
  2226. return 0;
  2227. }
  2228. ext4_lock_group(sb, i);
  2229. if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
  2230. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2231. "Checksum for group %u failed (%u!=%u)",
  2232. i, le16_to_cpu(ext4_group_desc_csum(sb, i,
  2233. gdp)), le16_to_cpu(gdp->bg_checksum));
  2234. if (!sb_rdonly(sb)) {
  2235. ext4_unlock_group(sb, i);
  2236. return 0;
  2237. }
  2238. }
  2239. ext4_unlock_group(sb, i);
  2240. if (!flexbg_flag)
  2241. first_block += EXT4_BLOCKS_PER_GROUP(sb);
  2242. }
  2243. if (NULL != first_not_zeroed)
  2244. *first_not_zeroed = grp;
  2245. return 1;
  2246. }
  2247. /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
  2248. * the superblock) which were deleted from all directories, but held open by
  2249. * a process at the time of a crash. We walk the list and try to delete these
  2250. * inodes at recovery time (only with a read-write filesystem).
  2251. *
  2252. * In order to keep the orphan inode chain consistent during traversal (in
  2253. * case of crash during recovery), we link each inode into the superblock
  2254. * orphan list_head and handle it the same way as an inode deletion during
  2255. * normal operation (which journals the operations for us).
  2256. *
  2257. * We only do an iget() and an iput() on each inode, which is very safe if we
  2258. * accidentally point at an in-use or already deleted inode. The worst that
  2259. * can happen in this case is that we get a "bit already cleared" message from
  2260. * ext4_free_inode(). The only reason we would point at a wrong inode is if
  2261. * e2fsck was run on this filesystem, and it must have already done the orphan
  2262. * inode cleanup for us, so we can safely abort without any further action.
  2263. */
  2264. static void ext4_orphan_cleanup(struct super_block *sb,
  2265. struct ext4_super_block *es)
  2266. {
  2267. unsigned int s_flags = sb->s_flags;
  2268. int ret, nr_orphans = 0, nr_truncates = 0;
  2269. #ifdef CONFIG_QUOTA
  2270. int quota_update = 0;
  2271. int i;
  2272. #endif
  2273. if (!es->s_last_orphan) {
  2274. jbd_debug(4, "no orphan inodes to clean up\n");
  2275. return;
  2276. }
  2277. if (bdev_read_only(sb->s_bdev)) {
  2278. ext4_msg(sb, KERN_ERR, "write access "
  2279. "unavailable, skipping orphan cleanup");
  2280. return;
  2281. }
  2282. /* Check if feature set would not allow a r/w mount */
  2283. if (!ext4_feature_set_ok(sb, 0)) {
  2284. ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
  2285. "unknown ROCOMPAT features");
  2286. return;
  2287. }
  2288. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2289. /* don't clear list on RO mount w/ errors */
  2290. if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
  2291. ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
  2292. "clearing orphan list.\n");
  2293. es->s_last_orphan = 0;
  2294. }
  2295. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2296. return;
  2297. }
  2298. if (s_flags & SB_RDONLY) {
  2299. ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
  2300. sb->s_flags &= ~SB_RDONLY;
  2301. }
  2302. #ifdef CONFIG_QUOTA
  2303. /* Needed for iput() to work correctly and not trash data */
  2304. sb->s_flags |= SB_ACTIVE;
  2305. /*
  2306. * Turn on quotas which were not enabled for read-only mounts if
  2307. * filesystem has quota feature, so that they are updated correctly.
  2308. */
  2309. if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
  2310. int ret = ext4_enable_quotas(sb);
  2311. if (!ret)
  2312. quota_update = 1;
  2313. else
  2314. ext4_msg(sb, KERN_ERR,
  2315. "Cannot turn on quotas: error %d", ret);
  2316. }
  2317. /* Turn on journaled quotas used for old sytle */
  2318. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2319. if (EXT4_SB(sb)->s_qf_names[i]) {
  2320. int ret = ext4_quota_on_mount(sb, i);
  2321. if (!ret)
  2322. quota_update = 1;
  2323. else
  2324. ext4_msg(sb, KERN_ERR,
  2325. "Cannot turn on journaled "
  2326. "quota: type %d: error %d", i, ret);
  2327. }
  2328. }
  2329. #endif
  2330. while (es->s_last_orphan) {
  2331. struct inode *inode;
  2332. /*
  2333. * We may have encountered an error during cleanup; if
  2334. * so, skip the rest.
  2335. */
  2336. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2337. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2338. es->s_last_orphan = 0;
  2339. break;
  2340. }
  2341. inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
  2342. if (IS_ERR(inode)) {
  2343. es->s_last_orphan = 0;
  2344. break;
  2345. }
  2346. list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
  2347. dquot_initialize(inode);
  2348. if (inode->i_nlink) {
  2349. if (test_opt(sb, DEBUG))
  2350. ext4_msg(sb, KERN_DEBUG,
  2351. "%s: truncating inode %lu to %lld bytes",
  2352. __func__, inode->i_ino, inode->i_size);
  2353. jbd_debug(2, "truncating inode %lu to %lld bytes\n",
  2354. inode->i_ino, inode->i_size);
  2355. inode_lock(inode);
  2356. truncate_inode_pages(inode->i_mapping, inode->i_size);
  2357. ret = ext4_truncate(inode);
  2358. if (ret)
  2359. ext4_std_error(inode->i_sb, ret);
  2360. inode_unlock(inode);
  2361. nr_truncates++;
  2362. } else {
  2363. if (test_opt(sb, DEBUG))
  2364. ext4_msg(sb, KERN_DEBUG,
  2365. "%s: deleting unreferenced inode %lu",
  2366. __func__, inode->i_ino);
  2367. jbd_debug(2, "deleting unreferenced inode %lu\n",
  2368. inode->i_ino);
  2369. nr_orphans++;
  2370. }
  2371. iput(inode); /* The delete magic happens here! */
  2372. }
  2373. #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
  2374. if (nr_orphans)
  2375. ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
  2376. PLURAL(nr_orphans));
  2377. if (nr_truncates)
  2378. ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
  2379. PLURAL(nr_truncates));
  2380. #ifdef CONFIG_QUOTA
  2381. /* Turn off quotas if they were enabled for orphan cleanup */
  2382. if (quota_update) {
  2383. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2384. if (sb_dqopt(sb)->files[i])
  2385. dquot_quota_off(sb, i);
  2386. }
  2387. }
  2388. #endif
  2389. sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  2390. }
  2391. /*
  2392. * Maximal extent format file size.
  2393. * Resulting logical blkno at s_maxbytes must fit in our on-disk
  2394. * extent format containers, within a sector_t, and within i_blocks
  2395. * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
  2396. * so that won't be a limiting factor.
  2397. *
  2398. * However there is other limiting factor. We do store extents in the form
  2399. * of starting block and length, hence the resulting length of the extent
  2400. * covering maximum file size must fit into on-disk format containers as
  2401. * well. Given that length is always by 1 unit bigger than max unit (because
  2402. * we count 0 as well) we have to lower the s_maxbytes by one fs block.
  2403. *
  2404. * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  2405. */
  2406. static loff_t ext4_max_size(int blkbits, int has_huge_files)
  2407. {
  2408. loff_t res;
  2409. loff_t upper_limit = MAX_LFS_FILESIZE;
  2410. /* small i_blocks in vfs inode? */
  2411. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2412. /*
  2413. * CONFIG_LBDAF is not enabled implies the inode
  2414. * i_block represent total blocks in 512 bytes
  2415. * 32 == size of vfs inode i_blocks * 8
  2416. */
  2417. upper_limit = (1LL << 32) - 1;
  2418. /* total blocks in file system block size */
  2419. upper_limit >>= (blkbits - 9);
  2420. upper_limit <<= blkbits;
  2421. }
  2422. /*
  2423. * 32-bit extent-start container, ee_block. We lower the maxbytes
  2424. * by one fs block, so ee_len can cover the extent of maximum file
  2425. * size
  2426. */
  2427. res = (1LL << 32) - 1;
  2428. res <<= blkbits;
  2429. /* Sanity check against vm- & vfs- imposed limits */
  2430. if (res > upper_limit)
  2431. res = upper_limit;
  2432. return res;
  2433. }
  2434. /*
  2435. * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
  2436. * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
  2437. * We need to be 1 filesystem block less than the 2^48 sector limit.
  2438. */
  2439. static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
  2440. {
  2441. loff_t res = EXT4_NDIR_BLOCKS;
  2442. int meta_blocks;
  2443. loff_t upper_limit;
  2444. /* This is calculated to be the largest file size for a dense, block
  2445. * mapped file such that the file's total number of 512-byte sectors,
  2446. * including data and all indirect blocks, does not exceed (2^48 - 1).
  2447. *
  2448. * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
  2449. * number of 512-byte sectors of the file.
  2450. */
  2451. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2452. /*
  2453. * !has_huge_files or CONFIG_LBDAF not enabled implies that
  2454. * the inode i_block field represents total file blocks in
  2455. * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
  2456. */
  2457. upper_limit = (1LL << 32) - 1;
  2458. /* total blocks in file system block size */
  2459. upper_limit >>= (bits - 9);
  2460. } else {
  2461. /*
  2462. * We use 48 bit ext4_inode i_blocks
  2463. * With EXT4_HUGE_FILE_FL set the i_blocks
  2464. * represent total number of blocks in
  2465. * file system block size
  2466. */
  2467. upper_limit = (1LL << 48) - 1;
  2468. }
  2469. /* indirect blocks */
  2470. meta_blocks = 1;
  2471. /* double indirect blocks */
  2472. meta_blocks += 1 + (1LL << (bits-2));
  2473. /* tripple indirect blocks */
  2474. meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
  2475. upper_limit -= meta_blocks;
  2476. upper_limit <<= bits;
  2477. res += 1LL << (bits-2);
  2478. res += 1LL << (2*(bits-2));
  2479. res += 1LL << (3*(bits-2));
  2480. res <<= bits;
  2481. if (res > upper_limit)
  2482. res = upper_limit;
  2483. if (res > MAX_LFS_FILESIZE)
  2484. res = MAX_LFS_FILESIZE;
  2485. return res;
  2486. }
  2487. static ext4_fsblk_t descriptor_loc(struct super_block *sb,
  2488. ext4_fsblk_t logical_sb_block, int nr)
  2489. {
  2490. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2491. ext4_group_t bg, first_meta_bg;
  2492. int has_super = 0;
  2493. first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
  2494. if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
  2495. return logical_sb_block + nr + 1;
  2496. bg = sbi->s_desc_per_block * nr;
  2497. if (ext4_bg_has_super(sb, bg))
  2498. has_super = 1;
  2499. /*
  2500. * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
  2501. * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
  2502. * on modern mke2fs or blksize > 1k on older mke2fs) then we must
  2503. * compensate.
  2504. */
  2505. if (sb->s_blocksize == 1024 && nr == 0 &&
  2506. le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
  2507. has_super++;
  2508. return (has_super + ext4_group_first_block_no(sb, bg));
  2509. }
  2510. /**
  2511. * ext4_get_stripe_size: Get the stripe size.
  2512. * @sbi: In memory super block info
  2513. *
  2514. * If we have specified it via mount option, then
  2515. * use the mount option value. If the value specified at mount time is
  2516. * greater than the blocks per group use the super block value.
  2517. * If the super block value is greater than blocks per group return 0.
  2518. * Allocator needs it be less than blocks per group.
  2519. *
  2520. */
  2521. static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
  2522. {
  2523. unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
  2524. unsigned long stripe_width =
  2525. le32_to_cpu(sbi->s_es->s_raid_stripe_width);
  2526. int ret;
  2527. if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
  2528. ret = sbi->s_stripe;
  2529. else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
  2530. ret = stripe_width;
  2531. else if (stride && stride <= sbi->s_blocks_per_group)
  2532. ret = stride;
  2533. else
  2534. ret = 0;
  2535. /*
  2536. * If the stripe width is 1, this makes no sense and
  2537. * we set it to 0 to turn off stripe handling code.
  2538. */
  2539. if (ret <= 1)
  2540. ret = 0;
  2541. return ret;
  2542. }
  2543. /*
  2544. * Check whether this filesystem can be mounted based on
  2545. * the features present and the RDONLY/RDWR mount requested.
  2546. * Returns 1 if this filesystem can be mounted as requested,
  2547. * 0 if it cannot be.
  2548. */
  2549. static int ext4_feature_set_ok(struct super_block *sb, int readonly)
  2550. {
  2551. if (ext4_has_unknown_ext4_incompat_features(sb)) {
  2552. ext4_msg(sb, KERN_ERR,
  2553. "Couldn't mount because of "
  2554. "unsupported optional features (%x)",
  2555. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
  2556. ~EXT4_FEATURE_INCOMPAT_SUPP));
  2557. return 0;
  2558. }
  2559. if (readonly)
  2560. return 1;
  2561. if (ext4_has_feature_readonly(sb)) {
  2562. ext4_msg(sb, KERN_INFO, "filesystem is read-only");
  2563. sb->s_flags |= SB_RDONLY;
  2564. return 1;
  2565. }
  2566. /* Check that feature set is OK for a read-write mount */
  2567. if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
  2568. ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
  2569. "unsupported optional features (%x)",
  2570. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
  2571. ~EXT4_FEATURE_RO_COMPAT_SUPP));
  2572. return 0;
  2573. }
  2574. /*
  2575. * Large file size enabled file system can only be mounted
  2576. * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
  2577. */
  2578. if (ext4_has_feature_huge_file(sb)) {
  2579. if (sizeof(blkcnt_t) < sizeof(u64)) {
  2580. ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
  2581. "cannot be mounted RDWR without "
  2582. "CONFIG_LBDAF");
  2583. return 0;
  2584. }
  2585. }
  2586. if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
  2587. ext4_msg(sb, KERN_ERR,
  2588. "Can't support bigalloc feature without "
  2589. "extents feature\n");
  2590. return 0;
  2591. }
  2592. #ifndef CONFIG_QUOTA
  2593. if (ext4_has_feature_quota(sb) && !readonly) {
  2594. ext4_msg(sb, KERN_ERR,
  2595. "Filesystem with quota feature cannot be mounted RDWR "
  2596. "without CONFIG_QUOTA");
  2597. return 0;
  2598. }
  2599. if (ext4_has_feature_project(sb) && !readonly) {
  2600. ext4_msg(sb, KERN_ERR,
  2601. "Filesystem with project quota feature cannot be mounted RDWR "
  2602. "without CONFIG_QUOTA");
  2603. return 0;
  2604. }
  2605. #endif /* CONFIG_QUOTA */
  2606. return 1;
  2607. }
  2608. /*
  2609. * This function is called once a day if we have errors logged
  2610. * on the file system
  2611. */
  2612. static void print_daily_error_info(struct timer_list *t)
  2613. {
  2614. struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
  2615. struct super_block *sb = sbi->s_sb;
  2616. struct ext4_super_block *es = sbi->s_es;
  2617. if (es->s_error_count)
  2618. /* fsck newer than v1.41.13 is needed to clean this condition. */
  2619. ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
  2620. le32_to_cpu(es->s_error_count));
  2621. if (es->s_first_error_time) {
  2622. printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
  2623. sb->s_id,
  2624. ext4_get_tstamp(es, s_first_error_time),
  2625. (int) sizeof(es->s_first_error_func),
  2626. es->s_first_error_func,
  2627. le32_to_cpu(es->s_first_error_line));
  2628. if (es->s_first_error_ino)
  2629. printk(KERN_CONT ": inode %u",
  2630. le32_to_cpu(es->s_first_error_ino));
  2631. if (es->s_first_error_block)
  2632. printk(KERN_CONT ": block %llu", (unsigned long long)
  2633. le64_to_cpu(es->s_first_error_block));
  2634. printk(KERN_CONT "\n");
  2635. }
  2636. if (es->s_last_error_time) {
  2637. printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
  2638. sb->s_id,
  2639. ext4_get_tstamp(es, s_last_error_time),
  2640. (int) sizeof(es->s_last_error_func),
  2641. es->s_last_error_func,
  2642. le32_to_cpu(es->s_last_error_line));
  2643. if (es->s_last_error_ino)
  2644. printk(KERN_CONT ": inode %u",
  2645. le32_to_cpu(es->s_last_error_ino));
  2646. if (es->s_last_error_block)
  2647. printk(KERN_CONT ": block %llu", (unsigned long long)
  2648. le64_to_cpu(es->s_last_error_block));
  2649. printk(KERN_CONT "\n");
  2650. }
  2651. mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
  2652. }
  2653. /* Find next suitable group and run ext4_init_inode_table */
  2654. static int ext4_run_li_request(struct ext4_li_request *elr)
  2655. {
  2656. struct ext4_group_desc *gdp = NULL;
  2657. ext4_group_t group, ngroups;
  2658. struct super_block *sb;
  2659. unsigned long timeout = 0;
  2660. int ret = 0;
  2661. sb = elr->lr_super;
  2662. ngroups = EXT4_SB(sb)->s_groups_count;
  2663. for (group = elr->lr_next_group; group < ngroups; group++) {
  2664. gdp = ext4_get_group_desc(sb, group, NULL);
  2665. if (!gdp) {
  2666. ret = 1;
  2667. break;
  2668. }
  2669. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2670. break;
  2671. }
  2672. if (group >= ngroups)
  2673. ret = 1;
  2674. if (!ret) {
  2675. timeout = jiffies;
  2676. ret = ext4_init_inode_table(sb, group,
  2677. elr->lr_timeout ? 0 : 1);
  2678. if (elr->lr_timeout == 0) {
  2679. timeout = (jiffies - timeout) *
  2680. elr->lr_sbi->s_li_wait_mult;
  2681. elr->lr_timeout = timeout;
  2682. }
  2683. elr->lr_next_sched = jiffies + elr->lr_timeout;
  2684. elr->lr_next_group = group + 1;
  2685. }
  2686. return ret;
  2687. }
  2688. /*
  2689. * Remove lr_request from the list_request and free the
  2690. * request structure. Should be called with li_list_mtx held
  2691. */
  2692. static void ext4_remove_li_request(struct ext4_li_request *elr)
  2693. {
  2694. struct ext4_sb_info *sbi;
  2695. if (!elr)
  2696. return;
  2697. sbi = elr->lr_sbi;
  2698. list_del(&elr->lr_request);
  2699. sbi->s_li_request = NULL;
  2700. kfree(elr);
  2701. }
  2702. static void ext4_unregister_li_request(struct super_block *sb)
  2703. {
  2704. mutex_lock(&ext4_li_mtx);
  2705. if (!ext4_li_info) {
  2706. mutex_unlock(&ext4_li_mtx);
  2707. return;
  2708. }
  2709. mutex_lock(&ext4_li_info->li_list_mtx);
  2710. ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
  2711. mutex_unlock(&ext4_li_info->li_list_mtx);
  2712. mutex_unlock(&ext4_li_mtx);
  2713. }
  2714. static struct task_struct *ext4_lazyinit_task;
  2715. /*
  2716. * This is the function where ext4lazyinit thread lives. It walks
  2717. * through the request list searching for next scheduled filesystem.
  2718. * When such a fs is found, run the lazy initialization request
  2719. * (ext4_rn_li_request) and keep track of the time spend in this
  2720. * function. Based on that time we compute next schedule time of
  2721. * the request. When walking through the list is complete, compute
  2722. * next waking time and put itself into sleep.
  2723. */
  2724. static int ext4_lazyinit_thread(void *arg)
  2725. {
  2726. struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
  2727. struct list_head *pos, *n;
  2728. struct ext4_li_request *elr;
  2729. unsigned long next_wakeup, cur;
  2730. BUG_ON(NULL == eli);
  2731. cont_thread:
  2732. while (true) {
  2733. next_wakeup = MAX_JIFFY_OFFSET;
  2734. mutex_lock(&eli->li_list_mtx);
  2735. if (list_empty(&eli->li_request_list)) {
  2736. mutex_unlock(&eli->li_list_mtx);
  2737. goto exit_thread;
  2738. }
  2739. list_for_each_safe(pos, n, &eli->li_request_list) {
  2740. int err = 0;
  2741. int progress = 0;
  2742. elr = list_entry(pos, struct ext4_li_request,
  2743. lr_request);
  2744. if (time_before(jiffies, elr->lr_next_sched)) {
  2745. if (time_before(elr->lr_next_sched, next_wakeup))
  2746. next_wakeup = elr->lr_next_sched;
  2747. continue;
  2748. }
  2749. if (down_read_trylock(&elr->lr_super->s_umount)) {
  2750. if (sb_start_write_trylock(elr->lr_super)) {
  2751. progress = 1;
  2752. /*
  2753. * We hold sb->s_umount, sb can not
  2754. * be removed from the list, it is
  2755. * now safe to drop li_list_mtx
  2756. */
  2757. mutex_unlock(&eli->li_list_mtx);
  2758. err = ext4_run_li_request(elr);
  2759. sb_end_write(elr->lr_super);
  2760. mutex_lock(&eli->li_list_mtx);
  2761. n = pos->next;
  2762. }
  2763. up_read((&elr->lr_super->s_umount));
  2764. }
  2765. /* error, remove the lazy_init job */
  2766. if (err) {
  2767. ext4_remove_li_request(elr);
  2768. continue;
  2769. }
  2770. if (!progress) {
  2771. elr->lr_next_sched = jiffies +
  2772. (prandom_u32()
  2773. % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2774. }
  2775. if (time_before(elr->lr_next_sched, next_wakeup))
  2776. next_wakeup = elr->lr_next_sched;
  2777. }
  2778. mutex_unlock(&eli->li_list_mtx);
  2779. try_to_freeze();
  2780. cur = jiffies;
  2781. if ((time_after_eq(cur, next_wakeup)) ||
  2782. (MAX_JIFFY_OFFSET == next_wakeup)) {
  2783. cond_resched();
  2784. continue;
  2785. }
  2786. schedule_timeout_interruptible(next_wakeup - cur);
  2787. if (kthread_should_stop()) {
  2788. ext4_clear_request_list();
  2789. goto exit_thread;
  2790. }
  2791. }
  2792. exit_thread:
  2793. /*
  2794. * It looks like the request list is empty, but we need
  2795. * to check it under the li_list_mtx lock, to prevent any
  2796. * additions into it, and of course we should lock ext4_li_mtx
  2797. * to atomically free the list and ext4_li_info, because at
  2798. * this point another ext4 filesystem could be registering
  2799. * new one.
  2800. */
  2801. mutex_lock(&ext4_li_mtx);
  2802. mutex_lock(&eli->li_list_mtx);
  2803. if (!list_empty(&eli->li_request_list)) {
  2804. mutex_unlock(&eli->li_list_mtx);
  2805. mutex_unlock(&ext4_li_mtx);
  2806. goto cont_thread;
  2807. }
  2808. mutex_unlock(&eli->li_list_mtx);
  2809. kfree(ext4_li_info);
  2810. ext4_li_info = NULL;
  2811. mutex_unlock(&ext4_li_mtx);
  2812. return 0;
  2813. }
  2814. static void ext4_clear_request_list(void)
  2815. {
  2816. struct list_head *pos, *n;
  2817. struct ext4_li_request *elr;
  2818. mutex_lock(&ext4_li_info->li_list_mtx);
  2819. list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
  2820. elr = list_entry(pos, struct ext4_li_request,
  2821. lr_request);
  2822. ext4_remove_li_request(elr);
  2823. }
  2824. mutex_unlock(&ext4_li_info->li_list_mtx);
  2825. }
  2826. static int ext4_run_lazyinit_thread(void)
  2827. {
  2828. ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
  2829. ext4_li_info, "ext4lazyinit");
  2830. if (IS_ERR(ext4_lazyinit_task)) {
  2831. int err = PTR_ERR(ext4_lazyinit_task);
  2832. ext4_clear_request_list();
  2833. kfree(ext4_li_info);
  2834. ext4_li_info = NULL;
  2835. printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
  2836. "initialization thread\n",
  2837. err);
  2838. return err;
  2839. }
  2840. ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
  2841. return 0;
  2842. }
  2843. /*
  2844. * Check whether it make sense to run itable init. thread or not.
  2845. * If there is at least one uninitialized inode table, return
  2846. * corresponding group number, else the loop goes through all
  2847. * groups and return total number of groups.
  2848. */
  2849. static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
  2850. {
  2851. ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
  2852. struct ext4_group_desc *gdp = NULL;
  2853. if (!ext4_has_group_desc_csum(sb))
  2854. return ngroups;
  2855. for (group = 0; group < ngroups; group++) {
  2856. gdp = ext4_get_group_desc(sb, group, NULL);
  2857. if (!gdp)
  2858. continue;
  2859. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2860. break;
  2861. }
  2862. return group;
  2863. }
  2864. static int ext4_li_info_new(void)
  2865. {
  2866. struct ext4_lazy_init *eli = NULL;
  2867. eli = kzalloc(sizeof(*eli), GFP_KERNEL);
  2868. if (!eli)
  2869. return -ENOMEM;
  2870. INIT_LIST_HEAD(&eli->li_request_list);
  2871. mutex_init(&eli->li_list_mtx);
  2872. eli->li_state |= EXT4_LAZYINIT_QUIT;
  2873. ext4_li_info = eli;
  2874. return 0;
  2875. }
  2876. static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
  2877. ext4_group_t start)
  2878. {
  2879. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2880. struct ext4_li_request *elr;
  2881. elr = kzalloc(sizeof(*elr), GFP_KERNEL);
  2882. if (!elr)
  2883. return NULL;
  2884. elr->lr_super = sb;
  2885. elr->lr_sbi = sbi;
  2886. elr->lr_next_group = start;
  2887. /*
  2888. * Randomize first schedule time of the request to
  2889. * spread the inode table initialization requests
  2890. * better.
  2891. */
  2892. elr->lr_next_sched = jiffies + (prandom_u32() %
  2893. (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2894. return elr;
  2895. }
  2896. int ext4_register_li_request(struct super_block *sb,
  2897. ext4_group_t first_not_zeroed)
  2898. {
  2899. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2900. struct ext4_li_request *elr = NULL;
  2901. ext4_group_t ngroups = sbi->s_groups_count;
  2902. int ret = 0;
  2903. mutex_lock(&ext4_li_mtx);
  2904. if (sbi->s_li_request != NULL) {
  2905. /*
  2906. * Reset timeout so it can be computed again, because
  2907. * s_li_wait_mult might have changed.
  2908. */
  2909. sbi->s_li_request->lr_timeout = 0;
  2910. goto out;
  2911. }
  2912. if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
  2913. !test_opt(sb, INIT_INODE_TABLE))
  2914. goto out;
  2915. elr = ext4_li_request_new(sb, first_not_zeroed);
  2916. if (!elr) {
  2917. ret = -ENOMEM;
  2918. goto out;
  2919. }
  2920. if (NULL == ext4_li_info) {
  2921. ret = ext4_li_info_new();
  2922. if (ret)
  2923. goto out;
  2924. }
  2925. mutex_lock(&ext4_li_info->li_list_mtx);
  2926. list_add(&elr->lr_request, &ext4_li_info->li_request_list);
  2927. mutex_unlock(&ext4_li_info->li_list_mtx);
  2928. sbi->s_li_request = elr;
  2929. /*
  2930. * set elr to NULL here since it has been inserted to
  2931. * the request_list and the removal and free of it is
  2932. * handled by ext4_clear_request_list from now on.
  2933. */
  2934. elr = NULL;
  2935. if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
  2936. ret = ext4_run_lazyinit_thread();
  2937. if (ret)
  2938. goto out;
  2939. }
  2940. out:
  2941. mutex_unlock(&ext4_li_mtx);
  2942. if (ret)
  2943. kfree(elr);
  2944. return ret;
  2945. }
  2946. /*
  2947. * We do not need to lock anything since this is called on
  2948. * module unload.
  2949. */
  2950. static void ext4_destroy_lazyinit_thread(void)
  2951. {
  2952. /*
  2953. * If thread exited earlier
  2954. * there's nothing to be done.
  2955. */
  2956. if (!ext4_li_info || !ext4_lazyinit_task)
  2957. return;
  2958. kthread_stop(ext4_lazyinit_task);
  2959. }
  2960. static int set_journal_csum_feature_set(struct super_block *sb)
  2961. {
  2962. int ret = 1;
  2963. int compat, incompat;
  2964. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2965. if (ext4_has_metadata_csum(sb)) {
  2966. /* journal checksum v3 */
  2967. compat = 0;
  2968. incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
  2969. } else {
  2970. /* journal checksum v1 */
  2971. compat = JBD2_FEATURE_COMPAT_CHECKSUM;
  2972. incompat = 0;
  2973. }
  2974. jbd2_journal_clear_features(sbi->s_journal,
  2975. JBD2_FEATURE_COMPAT_CHECKSUM, 0,
  2976. JBD2_FEATURE_INCOMPAT_CSUM_V3 |
  2977. JBD2_FEATURE_INCOMPAT_CSUM_V2);
  2978. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  2979. ret = jbd2_journal_set_features(sbi->s_journal,
  2980. compat, 0,
  2981. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
  2982. incompat);
  2983. } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
  2984. ret = jbd2_journal_set_features(sbi->s_journal,
  2985. compat, 0,
  2986. incompat);
  2987. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2988. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2989. } else {
  2990. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2991. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2992. }
  2993. return ret;
  2994. }
  2995. /*
  2996. * Note: calculating the overhead so we can be compatible with
  2997. * historical BSD practice is quite difficult in the face of
  2998. * clusters/bigalloc. This is because multiple metadata blocks from
  2999. * different block group can end up in the same allocation cluster.
  3000. * Calculating the exact overhead in the face of clustered allocation
  3001. * requires either O(all block bitmaps) in memory or O(number of block
  3002. * groups**2) in time. We will still calculate the superblock for
  3003. * older file systems --- and if we come across with a bigalloc file
  3004. * system with zero in s_overhead_clusters the estimate will be close to
  3005. * correct especially for very large cluster sizes --- but for newer
  3006. * file systems, it's better to calculate this figure once at mkfs
  3007. * time, and store it in the superblock. If the superblock value is
  3008. * present (even for non-bigalloc file systems), we will use it.
  3009. */
  3010. static int count_overhead(struct super_block *sb, ext4_group_t grp,
  3011. char *buf)
  3012. {
  3013. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3014. struct ext4_group_desc *gdp;
  3015. ext4_fsblk_t first_block, last_block, b;
  3016. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  3017. int s, j, count = 0;
  3018. if (!ext4_has_feature_bigalloc(sb))
  3019. return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
  3020. sbi->s_itb_per_group + 2);
  3021. first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
  3022. (grp * EXT4_BLOCKS_PER_GROUP(sb));
  3023. last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
  3024. for (i = 0; i < ngroups; i++) {
  3025. gdp = ext4_get_group_desc(sb, i, NULL);
  3026. b = ext4_block_bitmap(sb, gdp);
  3027. if (b >= first_block && b <= last_block) {
  3028. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  3029. count++;
  3030. }
  3031. b = ext4_inode_bitmap(sb, gdp);
  3032. if (b >= first_block && b <= last_block) {
  3033. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  3034. count++;
  3035. }
  3036. b = ext4_inode_table(sb, gdp);
  3037. if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
  3038. for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
  3039. int c = EXT4_B2C(sbi, b - first_block);
  3040. ext4_set_bit(c, buf);
  3041. count++;
  3042. }
  3043. if (i != grp)
  3044. continue;
  3045. s = 0;
  3046. if (ext4_bg_has_super(sb, grp)) {
  3047. ext4_set_bit(s++, buf);
  3048. count++;
  3049. }
  3050. j = ext4_bg_num_gdb(sb, grp);
  3051. if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
  3052. ext4_error(sb, "Invalid number of block group "
  3053. "descriptor blocks: %d", j);
  3054. j = EXT4_BLOCKS_PER_GROUP(sb) - s;
  3055. }
  3056. count += j;
  3057. for (; j > 0; j--)
  3058. ext4_set_bit(EXT4_B2C(sbi, s++), buf);
  3059. }
  3060. if (!count)
  3061. return 0;
  3062. return EXT4_CLUSTERS_PER_GROUP(sb) -
  3063. ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
  3064. }
  3065. /*
  3066. * Compute the overhead and stash it in sbi->s_overhead
  3067. */
  3068. int ext4_calculate_overhead(struct super_block *sb)
  3069. {
  3070. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3071. struct ext4_super_block *es = sbi->s_es;
  3072. struct inode *j_inode;
  3073. unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
  3074. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  3075. ext4_fsblk_t overhead = 0;
  3076. char *buf = (char *) get_zeroed_page(GFP_NOFS);
  3077. if (!buf)
  3078. return -ENOMEM;
  3079. /*
  3080. * Compute the overhead (FS structures). This is constant
  3081. * for a given filesystem unless the number of block groups
  3082. * changes so we cache the previous value until it does.
  3083. */
  3084. /*
  3085. * All of the blocks before first_data_block are overhead
  3086. */
  3087. overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
  3088. /*
  3089. * Add the overhead found in each block group
  3090. */
  3091. for (i = 0; i < ngroups; i++) {
  3092. int blks;
  3093. blks = count_overhead(sb, i, buf);
  3094. overhead += blks;
  3095. if (blks)
  3096. memset(buf, 0, PAGE_SIZE);
  3097. cond_resched();
  3098. }
  3099. /*
  3100. * Add the internal journal blocks whether the journal has been
  3101. * loaded or not
  3102. */
  3103. if (sbi->s_journal && !sbi->journal_bdev)
  3104. overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
  3105. else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
  3106. j_inode = ext4_get_journal_inode(sb, j_inum);
  3107. if (j_inode) {
  3108. j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
  3109. overhead += EXT4_NUM_B2C(sbi, j_blocks);
  3110. iput(j_inode);
  3111. } else {
  3112. ext4_msg(sb, KERN_ERR, "can't get journal size");
  3113. }
  3114. }
  3115. sbi->s_overhead = overhead;
  3116. smp_wmb();
  3117. free_page((unsigned long) buf);
  3118. return 0;
  3119. }
  3120. static void ext4_set_resv_clusters(struct super_block *sb)
  3121. {
  3122. ext4_fsblk_t resv_clusters;
  3123. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3124. /*
  3125. * There's no need to reserve anything when we aren't using extents.
  3126. * The space estimates are exact, there are no unwritten extents,
  3127. * hole punching doesn't need new metadata... This is needed especially
  3128. * to keep ext2/3 backward compatibility.
  3129. */
  3130. if (!ext4_has_feature_extents(sb))
  3131. return;
  3132. /*
  3133. * By default we reserve 2% or 4096 clusters, whichever is smaller.
  3134. * This should cover the situations where we can not afford to run
  3135. * out of space like for example punch hole, or converting
  3136. * unwritten extents in delalloc path. In most cases such
  3137. * allocation would require 1, or 2 blocks, higher numbers are
  3138. * very rare.
  3139. */
  3140. resv_clusters = (ext4_blocks_count(sbi->s_es) >>
  3141. sbi->s_cluster_bits);
  3142. do_div(resv_clusters, 50);
  3143. resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
  3144. atomic64_set(&sbi->s_resv_clusters, resv_clusters);
  3145. }
  3146. static int ext4_fill_super(struct super_block *sb, void *data, int silent)
  3147. {
  3148. struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
  3149. char *orig_data = kstrdup(data, GFP_KERNEL);
  3150. struct buffer_head *bh;
  3151. struct ext4_super_block *es = NULL;
  3152. struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
  3153. ext4_fsblk_t block;
  3154. ext4_fsblk_t sb_block = get_sb_block(&data);
  3155. ext4_fsblk_t logical_sb_block;
  3156. unsigned long offset = 0;
  3157. unsigned long journal_devnum = 0;
  3158. unsigned long def_mount_opts;
  3159. struct inode *root;
  3160. const char *descr;
  3161. int ret = -ENOMEM;
  3162. int blocksize, clustersize;
  3163. unsigned int db_count;
  3164. unsigned int i;
  3165. int needs_recovery, has_huge_files, has_bigalloc;
  3166. __u64 blocks_count;
  3167. int err = 0;
  3168. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  3169. ext4_group_t first_not_zeroed;
  3170. if ((data && !orig_data) || !sbi)
  3171. goto out_free_base;
  3172. sbi->s_daxdev = dax_dev;
  3173. sbi->s_blockgroup_lock =
  3174. kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
  3175. if (!sbi->s_blockgroup_lock)
  3176. goto out_free_base;
  3177. sb->s_fs_info = sbi;
  3178. sbi->s_sb = sb;
  3179. sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
  3180. sbi->s_sb_block = sb_block;
  3181. if (sb->s_bdev->bd_part)
  3182. sbi->s_sectors_written_start =
  3183. part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
  3184. /* Cleanup superblock name */
  3185. strreplace(sb->s_id, '/', '!');
  3186. /* -EINVAL is default */
  3187. ret = -EINVAL;
  3188. blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
  3189. if (!blocksize) {
  3190. ext4_msg(sb, KERN_ERR, "unable to set blocksize");
  3191. goto out_fail;
  3192. }
  3193. /*
  3194. * The ext4 superblock will not be buffer aligned for other than 1kB
  3195. * block sizes. We need to calculate the offset from buffer start.
  3196. */
  3197. if (blocksize != EXT4_MIN_BLOCK_SIZE) {
  3198. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3199. offset = do_div(logical_sb_block, blocksize);
  3200. } else {
  3201. logical_sb_block = sb_block;
  3202. }
  3203. if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
  3204. ext4_msg(sb, KERN_ERR, "unable to read superblock");
  3205. goto out_fail;
  3206. }
  3207. /*
  3208. * Note: s_es must be initialized as soon as possible because
  3209. * some ext4 macro-instructions depend on its value
  3210. */
  3211. es = (struct ext4_super_block *) (bh->b_data + offset);
  3212. sbi->s_es = es;
  3213. sb->s_magic = le16_to_cpu(es->s_magic);
  3214. if (sb->s_magic != EXT4_SUPER_MAGIC)
  3215. goto cantfind_ext4;
  3216. sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
  3217. /* Warn if metadata_csum and gdt_csum are both set. */
  3218. if (ext4_has_feature_metadata_csum(sb) &&
  3219. ext4_has_feature_gdt_csum(sb))
  3220. ext4_warning(sb, "metadata_csum and uninit_bg are "
  3221. "redundant flags; please run fsck.");
  3222. /* Check for a known checksum algorithm */
  3223. if (!ext4_verify_csum_type(sb, es)) {
  3224. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3225. "unknown checksum algorithm.");
  3226. silent = 1;
  3227. goto cantfind_ext4;
  3228. }
  3229. /* Load the checksum driver */
  3230. sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
  3231. if (IS_ERR(sbi->s_chksum_driver)) {
  3232. ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
  3233. ret = PTR_ERR(sbi->s_chksum_driver);
  3234. sbi->s_chksum_driver = NULL;
  3235. goto failed_mount;
  3236. }
  3237. /* Check superblock checksum */
  3238. if (!ext4_superblock_csum_verify(sb, es)) {
  3239. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3240. "invalid superblock checksum. Run e2fsck?");
  3241. silent = 1;
  3242. ret = -EFSBADCRC;
  3243. goto cantfind_ext4;
  3244. }
  3245. /* Precompute checksum seed for all metadata */
  3246. if (ext4_has_feature_csum_seed(sb))
  3247. sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
  3248. else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
  3249. sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
  3250. sizeof(es->s_uuid));
  3251. /* Set defaults before we parse the mount options */
  3252. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  3253. set_opt(sb, INIT_INODE_TABLE);
  3254. if (def_mount_opts & EXT4_DEFM_DEBUG)
  3255. set_opt(sb, DEBUG);
  3256. if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
  3257. set_opt(sb, GRPID);
  3258. if (def_mount_opts & EXT4_DEFM_UID16)
  3259. set_opt(sb, NO_UID32);
  3260. /* xattr user namespace & acls are now defaulted on */
  3261. set_opt(sb, XATTR_USER);
  3262. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  3263. set_opt(sb, POSIX_ACL);
  3264. #endif
  3265. /* don't forget to enable journal_csum when metadata_csum is enabled. */
  3266. if (ext4_has_metadata_csum(sb))
  3267. set_opt(sb, JOURNAL_CHECKSUM);
  3268. if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
  3269. set_opt(sb, JOURNAL_DATA);
  3270. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
  3271. set_opt(sb, ORDERED_DATA);
  3272. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
  3273. set_opt(sb, WRITEBACK_DATA);
  3274. if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
  3275. set_opt(sb, ERRORS_PANIC);
  3276. else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
  3277. set_opt(sb, ERRORS_CONT);
  3278. else
  3279. set_opt(sb, ERRORS_RO);
  3280. /* block_validity enabled by default; disable with noblock_validity */
  3281. set_opt(sb, BLOCK_VALIDITY);
  3282. if (def_mount_opts & EXT4_DEFM_DISCARD)
  3283. set_opt(sb, DISCARD);
  3284. sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
  3285. sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
  3286. sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
  3287. sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
  3288. sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
  3289. if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
  3290. set_opt(sb, BARRIER);
  3291. /*
  3292. * enable delayed allocation by default
  3293. * Use -o nodelalloc to turn it off
  3294. */
  3295. if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
  3296. ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
  3297. set_opt(sb, DELALLOC);
  3298. /*
  3299. * set default s_li_wait_mult for lazyinit, for the case there is
  3300. * no mount option specified.
  3301. */
  3302. sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
  3303. if (sbi->s_es->s_mount_opts[0]) {
  3304. char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
  3305. sizeof(sbi->s_es->s_mount_opts),
  3306. GFP_KERNEL);
  3307. if (!s_mount_opts)
  3308. goto failed_mount;
  3309. if (!parse_options(s_mount_opts, sb, &journal_devnum,
  3310. &journal_ioprio, 0)) {
  3311. ext4_msg(sb, KERN_WARNING,
  3312. "failed to parse options in superblock: %s",
  3313. s_mount_opts);
  3314. }
  3315. kfree(s_mount_opts);
  3316. }
  3317. sbi->s_def_mount_opt = sbi->s_mount_opt;
  3318. if (!parse_options((char *) data, sb, &journal_devnum,
  3319. &journal_ioprio, 0))
  3320. goto failed_mount;
  3321. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  3322. printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
  3323. "with data=journal disables delayed "
  3324. "allocation and O_DIRECT support!\n");
  3325. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  3326. ext4_msg(sb, KERN_ERR, "can't mount with "
  3327. "both data=journal and delalloc");
  3328. goto failed_mount;
  3329. }
  3330. if (test_opt(sb, DIOREAD_NOLOCK)) {
  3331. ext4_msg(sb, KERN_ERR, "can't mount with "
  3332. "both data=journal and dioread_nolock");
  3333. goto failed_mount;
  3334. }
  3335. if (test_opt(sb, DAX)) {
  3336. ext4_msg(sb, KERN_ERR, "can't mount with "
  3337. "both data=journal and dax");
  3338. goto failed_mount;
  3339. }
  3340. if (ext4_has_feature_encrypt(sb)) {
  3341. ext4_msg(sb, KERN_WARNING,
  3342. "encrypted files will use data=ordered "
  3343. "instead of data journaling mode");
  3344. }
  3345. if (test_opt(sb, DELALLOC))
  3346. clear_opt(sb, DELALLOC);
  3347. } else {
  3348. sb->s_iflags |= SB_I_CGROUPWB;
  3349. }
  3350. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  3351. (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
  3352. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
  3353. (ext4_has_compat_features(sb) ||
  3354. ext4_has_ro_compat_features(sb) ||
  3355. ext4_has_incompat_features(sb)))
  3356. ext4_msg(sb, KERN_WARNING,
  3357. "feature flags set on rev 0 fs, "
  3358. "running e2fsck is recommended");
  3359. if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
  3360. set_opt2(sb, HURD_COMPAT);
  3361. if (ext4_has_feature_64bit(sb)) {
  3362. ext4_msg(sb, KERN_ERR,
  3363. "The Hurd can't support 64-bit file systems");
  3364. goto failed_mount;
  3365. }
  3366. /*
  3367. * ea_inode feature uses l_i_version field which is not
  3368. * available in HURD_COMPAT mode.
  3369. */
  3370. if (ext4_has_feature_ea_inode(sb)) {
  3371. ext4_msg(sb, KERN_ERR,
  3372. "ea_inode feature is not supported for Hurd");
  3373. goto failed_mount;
  3374. }
  3375. }
  3376. if (IS_EXT2_SB(sb)) {
  3377. if (ext2_feature_set_ok(sb))
  3378. ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
  3379. "using the ext4 subsystem");
  3380. else {
  3381. /*
  3382. * If we're probing be silent, if this looks like
  3383. * it's actually an ext[34] filesystem.
  3384. */
  3385. if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
  3386. goto failed_mount;
  3387. ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
  3388. "to feature incompatibilities");
  3389. goto failed_mount;
  3390. }
  3391. }
  3392. if (IS_EXT3_SB(sb)) {
  3393. if (ext3_feature_set_ok(sb))
  3394. ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
  3395. "using the ext4 subsystem");
  3396. else {
  3397. /*
  3398. * If we're probing be silent, if this looks like
  3399. * it's actually an ext4 filesystem.
  3400. */
  3401. if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
  3402. goto failed_mount;
  3403. ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
  3404. "to feature incompatibilities");
  3405. goto failed_mount;
  3406. }
  3407. }
  3408. /*
  3409. * Check feature flags regardless of the revision level, since we
  3410. * previously didn't change the revision level when setting the flags,
  3411. * so there is a chance incompat flags are set on a rev 0 filesystem.
  3412. */
  3413. if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
  3414. goto failed_mount;
  3415. blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
  3416. if (blocksize < EXT4_MIN_BLOCK_SIZE ||
  3417. blocksize > EXT4_MAX_BLOCK_SIZE) {
  3418. ext4_msg(sb, KERN_ERR,
  3419. "Unsupported filesystem blocksize %d (%d log_block_size)",
  3420. blocksize, le32_to_cpu(es->s_log_block_size));
  3421. goto failed_mount;
  3422. }
  3423. if (le32_to_cpu(es->s_log_block_size) >
  3424. (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3425. ext4_msg(sb, KERN_ERR,
  3426. "Invalid log block size: %u",
  3427. le32_to_cpu(es->s_log_block_size));
  3428. goto failed_mount;
  3429. }
  3430. if (le32_to_cpu(es->s_log_cluster_size) >
  3431. (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3432. ext4_msg(sb, KERN_ERR,
  3433. "Invalid log cluster size: %u",
  3434. le32_to_cpu(es->s_log_cluster_size));
  3435. goto failed_mount;
  3436. }
  3437. if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
  3438. ext4_msg(sb, KERN_ERR,
  3439. "Number of reserved GDT blocks insanely large: %d",
  3440. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
  3441. goto failed_mount;
  3442. }
  3443. if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
  3444. if (ext4_has_feature_inline_data(sb)) {
  3445. ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
  3446. " that may contain inline data");
  3447. sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
  3448. }
  3449. if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
  3450. ext4_msg(sb, KERN_ERR,
  3451. "DAX unsupported by block device. Turning off DAX.");
  3452. sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
  3453. }
  3454. }
  3455. if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
  3456. ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
  3457. es->s_encryption_level);
  3458. goto failed_mount;
  3459. }
  3460. if (sb->s_blocksize != blocksize) {
  3461. /* Validate the filesystem blocksize */
  3462. if (!sb_set_blocksize(sb, blocksize)) {
  3463. ext4_msg(sb, KERN_ERR, "bad block size %d",
  3464. blocksize);
  3465. goto failed_mount;
  3466. }
  3467. brelse(bh);
  3468. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3469. offset = do_div(logical_sb_block, blocksize);
  3470. bh = sb_bread_unmovable(sb, logical_sb_block);
  3471. if (!bh) {
  3472. ext4_msg(sb, KERN_ERR,
  3473. "Can't read superblock on 2nd try");
  3474. goto failed_mount;
  3475. }
  3476. es = (struct ext4_super_block *)(bh->b_data + offset);
  3477. sbi->s_es = es;
  3478. if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
  3479. ext4_msg(sb, KERN_ERR,
  3480. "Magic mismatch, very weird!");
  3481. goto failed_mount;
  3482. }
  3483. }
  3484. has_huge_files = ext4_has_feature_huge_file(sb);
  3485. sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
  3486. has_huge_files);
  3487. sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
  3488. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
  3489. sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
  3490. sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
  3491. } else {
  3492. sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
  3493. sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
  3494. if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
  3495. ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
  3496. sbi->s_first_ino);
  3497. goto failed_mount;
  3498. }
  3499. if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
  3500. (!is_power_of_2(sbi->s_inode_size)) ||
  3501. (sbi->s_inode_size > blocksize)) {
  3502. ext4_msg(sb, KERN_ERR,
  3503. "unsupported inode size: %d",
  3504. sbi->s_inode_size);
  3505. goto failed_mount;
  3506. }
  3507. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
  3508. sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
  3509. }
  3510. sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
  3511. if (ext4_has_feature_64bit(sb)) {
  3512. if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
  3513. sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
  3514. !is_power_of_2(sbi->s_desc_size)) {
  3515. ext4_msg(sb, KERN_ERR,
  3516. "unsupported descriptor size %lu",
  3517. sbi->s_desc_size);
  3518. goto failed_mount;
  3519. }
  3520. } else
  3521. sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
  3522. sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
  3523. sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
  3524. sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
  3525. if (sbi->s_inodes_per_block == 0)
  3526. goto cantfind_ext4;
  3527. if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
  3528. sbi->s_inodes_per_group > blocksize * 8) {
  3529. ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
  3530. sbi->s_blocks_per_group);
  3531. goto failed_mount;
  3532. }
  3533. sbi->s_itb_per_group = sbi->s_inodes_per_group /
  3534. sbi->s_inodes_per_block;
  3535. sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
  3536. sbi->s_sbh = bh;
  3537. sbi->s_mount_state = le16_to_cpu(es->s_state);
  3538. sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
  3539. sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
  3540. for (i = 0; i < 4; i++)
  3541. sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
  3542. sbi->s_def_hash_version = es->s_def_hash_version;
  3543. if (ext4_has_feature_dir_index(sb)) {
  3544. i = le32_to_cpu(es->s_flags);
  3545. if (i & EXT2_FLAGS_UNSIGNED_HASH)
  3546. sbi->s_hash_unsigned = 3;
  3547. else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
  3548. #ifdef __CHAR_UNSIGNED__
  3549. if (!sb_rdonly(sb))
  3550. es->s_flags |=
  3551. cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
  3552. sbi->s_hash_unsigned = 3;
  3553. #else
  3554. if (!sb_rdonly(sb))
  3555. es->s_flags |=
  3556. cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
  3557. #endif
  3558. }
  3559. }
  3560. /* Handle clustersize */
  3561. clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
  3562. has_bigalloc = ext4_has_feature_bigalloc(sb);
  3563. if (has_bigalloc) {
  3564. if (clustersize < blocksize) {
  3565. ext4_msg(sb, KERN_ERR,
  3566. "cluster size (%d) smaller than "
  3567. "block size (%d)", clustersize, blocksize);
  3568. goto failed_mount;
  3569. }
  3570. sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
  3571. le32_to_cpu(es->s_log_block_size);
  3572. sbi->s_clusters_per_group =
  3573. le32_to_cpu(es->s_clusters_per_group);
  3574. if (sbi->s_clusters_per_group > blocksize * 8) {
  3575. ext4_msg(sb, KERN_ERR,
  3576. "#clusters per group too big: %lu",
  3577. sbi->s_clusters_per_group);
  3578. goto failed_mount;
  3579. }
  3580. if (sbi->s_blocks_per_group !=
  3581. (sbi->s_clusters_per_group * (clustersize / blocksize))) {
  3582. ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
  3583. "clusters per group (%lu) inconsistent",
  3584. sbi->s_blocks_per_group,
  3585. sbi->s_clusters_per_group);
  3586. goto failed_mount;
  3587. }
  3588. } else {
  3589. if (clustersize != blocksize) {
  3590. ext4_msg(sb, KERN_ERR,
  3591. "fragment/cluster size (%d) != "
  3592. "block size (%d)", clustersize, blocksize);
  3593. goto failed_mount;
  3594. }
  3595. if (sbi->s_blocks_per_group > blocksize * 8) {
  3596. ext4_msg(sb, KERN_ERR,
  3597. "#blocks per group too big: %lu",
  3598. sbi->s_blocks_per_group);
  3599. goto failed_mount;
  3600. }
  3601. sbi->s_clusters_per_group = sbi->s_blocks_per_group;
  3602. sbi->s_cluster_bits = 0;
  3603. }
  3604. sbi->s_cluster_ratio = clustersize / blocksize;
  3605. /* Do we have standard group size of clustersize * 8 blocks ? */
  3606. if (sbi->s_blocks_per_group == clustersize << 3)
  3607. set_opt2(sb, STD_GROUP_SIZE);
  3608. /*
  3609. * Test whether we have more sectors than will fit in sector_t,
  3610. * and whether the max offset is addressable by the page cache.
  3611. */
  3612. err = generic_check_addressable(sb->s_blocksize_bits,
  3613. ext4_blocks_count(es));
  3614. if (err) {
  3615. ext4_msg(sb, KERN_ERR, "filesystem"
  3616. " too large to mount safely on this system");
  3617. if (sizeof(sector_t) < 8)
  3618. ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
  3619. goto failed_mount;
  3620. }
  3621. if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
  3622. goto cantfind_ext4;
  3623. /* check blocks count against device size */
  3624. blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
  3625. if (blocks_count && ext4_blocks_count(es) > blocks_count) {
  3626. ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
  3627. "exceeds size of device (%llu blocks)",
  3628. ext4_blocks_count(es), blocks_count);
  3629. goto failed_mount;
  3630. }
  3631. /*
  3632. * It makes no sense for the first data block to be beyond the end
  3633. * of the filesystem.
  3634. */
  3635. if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
  3636. ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
  3637. "block %u is beyond end of filesystem (%llu)",
  3638. le32_to_cpu(es->s_first_data_block),
  3639. ext4_blocks_count(es));
  3640. goto failed_mount;
  3641. }
  3642. if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
  3643. (sbi->s_cluster_ratio == 1)) {
  3644. ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
  3645. "block is 0 with a 1k block and cluster size");
  3646. goto failed_mount;
  3647. }
  3648. blocks_count = (ext4_blocks_count(es) -
  3649. le32_to_cpu(es->s_first_data_block) +
  3650. EXT4_BLOCKS_PER_GROUP(sb) - 1);
  3651. do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
  3652. if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
  3653. ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
  3654. "(block count %llu, first data block %u, "
  3655. "blocks per group %lu)", sbi->s_groups_count,
  3656. ext4_blocks_count(es),
  3657. le32_to_cpu(es->s_first_data_block),
  3658. EXT4_BLOCKS_PER_GROUP(sb));
  3659. goto failed_mount;
  3660. }
  3661. sbi->s_groups_count = blocks_count;
  3662. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  3663. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  3664. db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
  3665. EXT4_DESC_PER_BLOCK(sb);
  3666. if (ext4_has_feature_meta_bg(sb)) {
  3667. if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
  3668. ext4_msg(sb, KERN_WARNING,
  3669. "first meta block group too large: %u "
  3670. "(group descriptor block count %u)",
  3671. le32_to_cpu(es->s_first_meta_bg), db_count);
  3672. goto failed_mount;
  3673. }
  3674. }
  3675. sbi->s_group_desc = kvmalloc_array(db_count,
  3676. sizeof(struct buffer_head *),
  3677. GFP_KERNEL);
  3678. if (sbi->s_group_desc == NULL) {
  3679. ext4_msg(sb, KERN_ERR, "not enough memory");
  3680. ret = -ENOMEM;
  3681. goto failed_mount;
  3682. }
  3683. if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
  3684. le32_to_cpu(es->s_inodes_count)) {
  3685. ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
  3686. le32_to_cpu(es->s_inodes_count),
  3687. ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
  3688. ret = -EINVAL;
  3689. goto failed_mount;
  3690. }
  3691. bgl_lock_init(sbi->s_blockgroup_lock);
  3692. /* Pre-read the descriptors into the buffer cache */
  3693. for (i = 0; i < db_count; i++) {
  3694. block = descriptor_loc(sb, logical_sb_block, i);
  3695. sb_breadahead(sb, block);
  3696. }
  3697. for (i = 0; i < db_count; i++) {
  3698. block = descriptor_loc(sb, logical_sb_block, i);
  3699. sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
  3700. if (!sbi->s_group_desc[i]) {
  3701. ext4_msg(sb, KERN_ERR,
  3702. "can't read group descriptor %d", i);
  3703. db_count = i;
  3704. goto failed_mount2;
  3705. }
  3706. }
  3707. sbi->s_gdb_count = db_count;
  3708. if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
  3709. ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
  3710. ret = -EFSCORRUPTED;
  3711. goto failed_mount2;
  3712. }
  3713. timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
  3714. /* Register extent status tree shrinker */
  3715. if (ext4_es_register_shrinker(sbi))
  3716. goto failed_mount3;
  3717. sbi->s_stripe = ext4_get_stripe_size(sbi);
  3718. sbi->s_extent_max_zeroout_kb = 32;
  3719. /*
  3720. * set up enough so that it can read an inode
  3721. */
  3722. sb->s_op = &ext4_sops;
  3723. sb->s_export_op = &ext4_export_ops;
  3724. sb->s_xattr = ext4_xattr_handlers;
  3725. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  3726. sb->s_cop = &ext4_cryptops;
  3727. #endif
  3728. #ifdef CONFIG_QUOTA
  3729. sb->dq_op = &ext4_quota_operations;
  3730. if (ext4_has_feature_quota(sb))
  3731. sb->s_qcop = &dquot_quotactl_sysfile_ops;
  3732. else
  3733. sb->s_qcop = &ext4_qctl_operations;
  3734. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  3735. #endif
  3736. memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
  3737. INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
  3738. mutex_init(&sbi->s_orphan_lock);
  3739. sb->s_root = NULL;
  3740. needs_recovery = (es->s_last_orphan != 0 ||
  3741. ext4_has_feature_journal_needs_recovery(sb));
  3742. if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
  3743. if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
  3744. goto failed_mount3a;
  3745. /*
  3746. * The first inode we look at is the journal inode. Don't try
  3747. * root first: it may be modified in the journal!
  3748. */
  3749. if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
  3750. err = ext4_load_journal(sb, es, journal_devnum);
  3751. if (err)
  3752. goto failed_mount3a;
  3753. } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
  3754. ext4_has_feature_journal_needs_recovery(sb)) {
  3755. ext4_msg(sb, KERN_ERR, "required journal recovery "
  3756. "suppressed and not mounted read-only");
  3757. goto failed_mount_wq;
  3758. } else {
  3759. /* Nojournal mode, all journal mount options are illegal */
  3760. if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
  3761. ext4_msg(sb, KERN_ERR, "can't mount with "
  3762. "journal_checksum, fs mounted w/o journal");
  3763. goto failed_mount_wq;
  3764. }
  3765. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3766. ext4_msg(sb, KERN_ERR, "can't mount with "
  3767. "journal_async_commit, fs mounted w/o journal");
  3768. goto failed_mount_wq;
  3769. }
  3770. if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
  3771. ext4_msg(sb, KERN_ERR, "can't mount with "
  3772. "commit=%lu, fs mounted w/o journal",
  3773. sbi->s_commit_interval / HZ);
  3774. goto failed_mount_wq;
  3775. }
  3776. if (EXT4_MOUNT_DATA_FLAGS &
  3777. (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
  3778. ext4_msg(sb, KERN_ERR, "can't mount with "
  3779. "data=, fs mounted w/o journal");
  3780. goto failed_mount_wq;
  3781. }
  3782. sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
  3783. clear_opt(sb, JOURNAL_CHECKSUM);
  3784. clear_opt(sb, DATA_FLAGS);
  3785. sbi->s_journal = NULL;
  3786. needs_recovery = 0;
  3787. goto no_journal;
  3788. }
  3789. if (ext4_has_feature_64bit(sb) &&
  3790. !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
  3791. JBD2_FEATURE_INCOMPAT_64BIT)) {
  3792. ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
  3793. goto failed_mount_wq;
  3794. }
  3795. if (!set_journal_csum_feature_set(sb)) {
  3796. ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
  3797. "feature set");
  3798. goto failed_mount_wq;
  3799. }
  3800. /* We have now updated the journal if required, so we can
  3801. * validate the data journaling mode. */
  3802. switch (test_opt(sb, DATA_FLAGS)) {
  3803. case 0:
  3804. /* No mode set, assume a default based on the journal
  3805. * capabilities: ORDERED_DATA if the journal can
  3806. * cope, else JOURNAL_DATA
  3807. */
  3808. if (jbd2_journal_check_available_features
  3809. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
  3810. set_opt(sb, ORDERED_DATA);
  3811. sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
  3812. } else {
  3813. set_opt(sb, JOURNAL_DATA);
  3814. sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
  3815. }
  3816. break;
  3817. case EXT4_MOUNT_ORDERED_DATA:
  3818. case EXT4_MOUNT_WRITEBACK_DATA:
  3819. if (!jbd2_journal_check_available_features
  3820. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
  3821. ext4_msg(sb, KERN_ERR, "Journal does not support "
  3822. "requested data journaling mode");
  3823. goto failed_mount_wq;
  3824. }
  3825. default:
  3826. break;
  3827. }
  3828. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
  3829. test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3830. ext4_msg(sb, KERN_ERR, "can't mount with "
  3831. "journal_async_commit in data=ordered mode");
  3832. goto failed_mount_wq;
  3833. }
  3834. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  3835. sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
  3836. no_journal:
  3837. if (!test_opt(sb, NO_MBCACHE)) {
  3838. sbi->s_ea_block_cache = ext4_xattr_create_cache();
  3839. if (!sbi->s_ea_block_cache) {
  3840. ext4_msg(sb, KERN_ERR,
  3841. "Failed to create ea_block_cache");
  3842. goto failed_mount_wq;
  3843. }
  3844. if (ext4_has_feature_ea_inode(sb)) {
  3845. sbi->s_ea_inode_cache = ext4_xattr_create_cache();
  3846. if (!sbi->s_ea_inode_cache) {
  3847. ext4_msg(sb, KERN_ERR,
  3848. "Failed to create ea_inode_cache");
  3849. goto failed_mount_wq;
  3850. }
  3851. }
  3852. }
  3853. if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
  3854. (blocksize != PAGE_SIZE)) {
  3855. ext4_msg(sb, KERN_ERR,
  3856. "Unsupported blocksize for fs encryption");
  3857. goto failed_mount_wq;
  3858. }
  3859. if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
  3860. !ext4_has_feature_encrypt(sb)) {
  3861. ext4_set_feature_encrypt(sb);
  3862. ext4_commit_super(sb, 1);
  3863. }
  3864. /*
  3865. * Get the # of file system overhead blocks from the
  3866. * superblock if present.
  3867. */
  3868. if (es->s_overhead_clusters)
  3869. sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
  3870. else {
  3871. err = ext4_calculate_overhead(sb);
  3872. if (err)
  3873. goto failed_mount_wq;
  3874. }
  3875. /*
  3876. * The maximum number of concurrent works can be high and
  3877. * concurrency isn't really necessary. Limit it to 1.
  3878. */
  3879. EXT4_SB(sb)->rsv_conversion_wq =
  3880. alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
  3881. if (!EXT4_SB(sb)->rsv_conversion_wq) {
  3882. printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
  3883. ret = -ENOMEM;
  3884. goto failed_mount4;
  3885. }
  3886. /*
  3887. * The jbd2_journal_load will have done any necessary log recovery,
  3888. * so we can safely mount the rest of the filesystem now.
  3889. */
  3890. root = ext4_iget(sb, EXT4_ROOT_INO);
  3891. if (IS_ERR(root)) {
  3892. ext4_msg(sb, KERN_ERR, "get root inode failed");
  3893. ret = PTR_ERR(root);
  3894. root = NULL;
  3895. goto failed_mount4;
  3896. }
  3897. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  3898. ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
  3899. iput(root);
  3900. goto failed_mount4;
  3901. }
  3902. sb->s_root = d_make_root(root);
  3903. if (!sb->s_root) {
  3904. ext4_msg(sb, KERN_ERR, "get root dentry failed");
  3905. ret = -ENOMEM;
  3906. goto failed_mount4;
  3907. }
  3908. ret = ext4_setup_super(sb, es, sb_rdonly(sb));
  3909. if (ret == -EROFS) {
  3910. sb->s_flags |= SB_RDONLY;
  3911. ret = 0;
  3912. } else if (ret)
  3913. goto failed_mount4a;
  3914. /* determine the minimum size of new large inodes, if present */
  3915. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
  3916. sbi->s_want_extra_isize == 0) {
  3917. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3918. EXT4_GOOD_OLD_INODE_SIZE;
  3919. if (ext4_has_feature_extra_isize(sb)) {
  3920. if (sbi->s_want_extra_isize <
  3921. le16_to_cpu(es->s_want_extra_isize))
  3922. sbi->s_want_extra_isize =
  3923. le16_to_cpu(es->s_want_extra_isize);
  3924. if (sbi->s_want_extra_isize <
  3925. le16_to_cpu(es->s_min_extra_isize))
  3926. sbi->s_want_extra_isize =
  3927. le16_to_cpu(es->s_min_extra_isize);
  3928. }
  3929. }
  3930. /* Check if enough inode space is available */
  3931. if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
  3932. sbi->s_inode_size) {
  3933. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3934. EXT4_GOOD_OLD_INODE_SIZE;
  3935. ext4_msg(sb, KERN_INFO, "required extra inode space not"
  3936. "available");
  3937. }
  3938. ext4_set_resv_clusters(sb);
  3939. err = ext4_setup_system_zone(sb);
  3940. if (err) {
  3941. ext4_msg(sb, KERN_ERR, "failed to initialize system "
  3942. "zone (%d)", err);
  3943. goto failed_mount4a;
  3944. }
  3945. ext4_ext_init(sb);
  3946. err = ext4_mb_init(sb);
  3947. if (err) {
  3948. ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
  3949. err);
  3950. goto failed_mount5;
  3951. }
  3952. block = ext4_count_free_clusters(sb);
  3953. ext4_free_blocks_count_set(sbi->s_es,
  3954. EXT4_C2B(sbi, block));
  3955. ext4_superblock_csum_set(sb);
  3956. err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
  3957. GFP_KERNEL);
  3958. if (!err) {
  3959. unsigned long freei = ext4_count_free_inodes(sb);
  3960. sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
  3961. ext4_superblock_csum_set(sb);
  3962. err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
  3963. GFP_KERNEL);
  3964. }
  3965. if (!err)
  3966. err = percpu_counter_init(&sbi->s_dirs_counter,
  3967. ext4_count_dirs(sb), GFP_KERNEL);
  3968. if (!err)
  3969. err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
  3970. GFP_KERNEL);
  3971. if (!err)
  3972. err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
  3973. if (err) {
  3974. ext4_msg(sb, KERN_ERR, "insufficient memory");
  3975. goto failed_mount6;
  3976. }
  3977. if (ext4_has_feature_flex_bg(sb))
  3978. if (!ext4_fill_flex_info(sb)) {
  3979. ext4_msg(sb, KERN_ERR,
  3980. "unable to initialize "
  3981. "flex_bg meta info!");
  3982. goto failed_mount6;
  3983. }
  3984. err = ext4_register_li_request(sb, first_not_zeroed);
  3985. if (err)
  3986. goto failed_mount6;
  3987. err = ext4_register_sysfs(sb);
  3988. if (err)
  3989. goto failed_mount7;
  3990. #ifdef CONFIG_QUOTA
  3991. /* Enable quota usage during mount. */
  3992. if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
  3993. err = ext4_enable_quotas(sb);
  3994. if (err)
  3995. goto failed_mount8;
  3996. }
  3997. #endif /* CONFIG_QUOTA */
  3998. EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
  3999. ext4_orphan_cleanup(sb, es);
  4000. EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
  4001. if (needs_recovery) {
  4002. ext4_msg(sb, KERN_INFO, "recovery complete");
  4003. ext4_mark_recovery_complete(sb, es);
  4004. }
  4005. if (EXT4_SB(sb)->s_journal) {
  4006. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  4007. descr = " journalled data mode";
  4008. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  4009. descr = " ordered data mode";
  4010. else
  4011. descr = " writeback data mode";
  4012. } else
  4013. descr = "out journal";
  4014. if (test_opt(sb, DISCARD)) {
  4015. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  4016. if (!blk_queue_discard(q))
  4017. ext4_msg(sb, KERN_WARNING,
  4018. "mounting with \"discard\" option, but "
  4019. "the device does not support discard");
  4020. }
  4021. if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
  4022. ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
  4023. "Opts: %.*s%s%s", descr,
  4024. (int) sizeof(sbi->s_es->s_mount_opts),
  4025. sbi->s_es->s_mount_opts,
  4026. *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
  4027. if (es->s_error_count)
  4028. mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
  4029. /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
  4030. ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
  4031. ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
  4032. ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
  4033. kfree(orig_data);
  4034. return 0;
  4035. cantfind_ext4:
  4036. if (!silent)
  4037. ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
  4038. goto failed_mount;
  4039. #ifdef CONFIG_QUOTA
  4040. failed_mount8:
  4041. ext4_unregister_sysfs(sb);
  4042. #endif
  4043. failed_mount7:
  4044. ext4_unregister_li_request(sb);
  4045. failed_mount6:
  4046. ext4_mb_release(sb);
  4047. if (sbi->s_flex_groups)
  4048. kvfree(sbi->s_flex_groups);
  4049. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  4050. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  4051. percpu_counter_destroy(&sbi->s_dirs_counter);
  4052. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  4053. failed_mount5:
  4054. ext4_ext_release(sb);
  4055. ext4_release_system_zone(sb);
  4056. failed_mount4a:
  4057. dput(sb->s_root);
  4058. sb->s_root = NULL;
  4059. failed_mount4:
  4060. ext4_msg(sb, KERN_ERR, "mount failed");
  4061. if (EXT4_SB(sb)->rsv_conversion_wq)
  4062. destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
  4063. failed_mount_wq:
  4064. if (sbi->s_ea_inode_cache) {
  4065. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  4066. sbi->s_ea_inode_cache = NULL;
  4067. }
  4068. if (sbi->s_ea_block_cache) {
  4069. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  4070. sbi->s_ea_block_cache = NULL;
  4071. }
  4072. if (sbi->s_journal) {
  4073. jbd2_journal_destroy(sbi->s_journal);
  4074. sbi->s_journal = NULL;
  4075. }
  4076. failed_mount3a:
  4077. ext4_es_unregister_shrinker(sbi);
  4078. failed_mount3:
  4079. del_timer_sync(&sbi->s_err_report);
  4080. if (sbi->s_mmp_tsk)
  4081. kthread_stop(sbi->s_mmp_tsk);
  4082. failed_mount2:
  4083. for (i = 0; i < db_count; i++)
  4084. brelse(sbi->s_group_desc[i]);
  4085. kvfree(sbi->s_group_desc);
  4086. failed_mount:
  4087. if (sbi->s_chksum_driver)
  4088. crypto_free_shash(sbi->s_chksum_driver);
  4089. #ifdef CONFIG_QUOTA
  4090. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4091. kfree(sbi->s_qf_names[i]);
  4092. #endif
  4093. ext4_blkdev_remove(sbi);
  4094. brelse(bh);
  4095. out_fail:
  4096. sb->s_fs_info = NULL;
  4097. kfree(sbi->s_blockgroup_lock);
  4098. out_free_base:
  4099. kfree(sbi);
  4100. kfree(orig_data);
  4101. fs_put_dax(dax_dev);
  4102. return err ? err : ret;
  4103. }
  4104. /*
  4105. * Setup any per-fs journal parameters now. We'll do this both on
  4106. * initial mount, once the journal has been initialised but before we've
  4107. * done any recovery; and again on any subsequent remount.
  4108. */
  4109. static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
  4110. {
  4111. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4112. journal->j_commit_interval = sbi->s_commit_interval;
  4113. journal->j_min_batch_time = sbi->s_min_batch_time;
  4114. journal->j_max_batch_time = sbi->s_max_batch_time;
  4115. write_lock(&journal->j_state_lock);
  4116. if (test_opt(sb, BARRIER))
  4117. journal->j_flags |= JBD2_BARRIER;
  4118. else
  4119. journal->j_flags &= ~JBD2_BARRIER;
  4120. if (test_opt(sb, DATA_ERR_ABORT))
  4121. journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
  4122. else
  4123. journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
  4124. write_unlock(&journal->j_state_lock);
  4125. }
  4126. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  4127. unsigned int journal_inum)
  4128. {
  4129. struct inode *journal_inode;
  4130. /*
  4131. * Test for the existence of a valid inode on disk. Bad things
  4132. * happen if we iget() an unused inode, as the subsequent iput()
  4133. * will try to delete it.
  4134. */
  4135. journal_inode = ext4_iget(sb, journal_inum);
  4136. if (IS_ERR(journal_inode)) {
  4137. ext4_msg(sb, KERN_ERR, "no journal found");
  4138. return NULL;
  4139. }
  4140. if (!journal_inode->i_nlink) {
  4141. make_bad_inode(journal_inode);
  4142. iput(journal_inode);
  4143. ext4_msg(sb, KERN_ERR, "journal inode is deleted");
  4144. return NULL;
  4145. }
  4146. jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
  4147. journal_inode, journal_inode->i_size);
  4148. if (!S_ISREG(journal_inode->i_mode)) {
  4149. ext4_msg(sb, KERN_ERR, "invalid journal inode");
  4150. iput(journal_inode);
  4151. return NULL;
  4152. }
  4153. return journal_inode;
  4154. }
  4155. static journal_t *ext4_get_journal(struct super_block *sb,
  4156. unsigned int journal_inum)
  4157. {
  4158. struct inode *journal_inode;
  4159. journal_t *journal;
  4160. BUG_ON(!ext4_has_feature_journal(sb));
  4161. journal_inode = ext4_get_journal_inode(sb, journal_inum);
  4162. if (!journal_inode)
  4163. return NULL;
  4164. journal = jbd2_journal_init_inode(journal_inode);
  4165. if (!journal) {
  4166. ext4_msg(sb, KERN_ERR, "Could not load journal inode");
  4167. iput(journal_inode);
  4168. return NULL;
  4169. }
  4170. journal->j_private = sb;
  4171. ext4_init_journal_params(sb, journal);
  4172. return journal;
  4173. }
  4174. static journal_t *ext4_get_dev_journal(struct super_block *sb,
  4175. dev_t j_dev)
  4176. {
  4177. struct buffer_head *bh;
  4178. journal_t *journal;
  4179. ext4_fsblk_t start;
  4180. ext4_fsblk_t len;
  4181. int hblock, blocksize;
  4182. ext4_fsblk_t sb_block;
  4183. unsigned long offset;
  4184. struct ext4_super_block *es;
  4185. struct block_device *bdev;
  4186. BUG_ON(!ext4_has_feature_journal(sb));
  4187. bdev = ext4_blkdev_get(j_dev, sb);
  4188. if (bdev == NULL)
  4189. return NULL;
  4190. blocksize = sb->s_blocksize;
  4191. hblock = bdev_logical_block_size(bdev);
  4192. if (blocksize < hblock) {
  4193. ext4_msg(sb, KERN_ERR,
  4194. "blocksize too small for journal device");
  4195. goto out_bdev;
  4196. }
  4197. sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
  4198. offset = EXT4_MIN_BLOCK_SIZE % blocksize;
  4199. set_blocksize(bdev, blocksize);
  4200. if (!(bh = __bread(bdev, sb_block, blocksize))) {
  4201. ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
  4202. "external journal");
  4203. goto out_bdev;
  4204. }
  4205. es = (struct ext4_super_block *) (bh->b_data + offset);
  4206. if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
  4207. !(le32_to_cpu(es->s_feature_incompat) &
  4208. EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
  4209. ext4_msg(sb, KERN_ERR, "external journal has "
  4210. "bad superblock");
  4211. brelse(bh);
  4212. goto out_bdev;
  4213. }
  4214. if ((le32_to_cpu(es->s_feature_ro_compat) &
  4215. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
  4216. es->s_checksum != ext4_superblock_csum(sb, es)) {
  4217. ext4_msg(sb, KERN_ERR, "external journal has "
  4218. "corrupt superblock");
  4219. brelse(bh);
  4220. goto out_bdev;
  4221. }
  4222. if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
  4223. ext4_msg(sb, KERN_ERR, "journal UUID does not match");
  4224. brelse(bh);
  4225. goto out_bdev;
  4226. }
  4227. len = ext4_blocks_count(es);
  4228. start = sb_block + 1;
  4229. brelse(bh); /* we're done with the superblock */
  4230. journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
  4231. start, len, blocksize);
  4232. if (!journal) {
  4233. ext4_msg(sb, KERN_ERR, "failed to create device journal");
  4234. goto out_bdev;
  4235. }
  4236. journal->j_private = sb;
  4237. ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
  4238. wait_on_buffer(journal->j_sb_buffer);
  4239. if (!buffer_uptodate(journal->j_sb_buffer)) {
  4240. ext4_msg(sb, KERN_ERR, "I/O error on journal device");
  4241. goto out_journal;
  4242. }
  4243. if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
  4244. ext4_msg(sb, KERN_ERR, "External journal has more than one "
  4245. "user (unsupported) - %d",
  4246. be32_to_cpu(journal->j_superblock->s_nr_users));
  4247. goto out_journal;
  4248. }
  4249. EXT4_SB(sb)->journal_bdev = bdev;
  4250. ext4_init_journal_params(sb, journal);
  4251. return journal;
  4252. out_journal:
  4253. jbd2_journal_destroy(journal);
  4254. out_bdev:
  4255. ext4_blkdev_put(bdev);
  4256. return NULL;
  4257. }
  4258. static int ext4_load_journal(struct super_block *sb,
  4259. struct ext4_super_block *es,
  4260. unsigned long journal_devnum)
  4261. {
  4262. journal_t *journal;
  4263. unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
  4264. dev_t journal_dev;
  4265. int err = 0;
  4266. int really_read_only;
  4267. BUG_ON(!ext4_has_feature_journal(sb));
  4268. if (journal_devnum &&
  4269. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4270. ext4_msg(sb, KERN_INFO, "external journal device major/minor "
  4271. "numbers have changed");
  4272. journal_dev = new_decode_dev(journal_devnum);
  4273. } else
  4274. journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
  4275. really_read_only = bdev_read_only(sb->s_bdev);
  4276. /*
  4277. * Are we loading a blank journal or performing recovery after a
  4278. * crash? For recovery, we need to check in advance whether we
  4279. * can get read-write access to the device.
  4280. */
  4281. if (ext4_has_feature_journal_needs_recovery(sb)) {
  4282. if (sb_rdonly(sb)) {
  4283. ext4_msg(sb, KERN_INFO, "INFO: recovery "
  4284. "required on readonly filesystem");
  4285. if (really_read_only) {
  4286. ext4_msg(sb, KERN_ERR, "write access "
  4287. "unavailable, cannot proceed "
  4288. "(try mounting with noload)");
  4289. return -EROFS;
  4290. }
  4291. ext4_msg(sb, KERN_INFO, "write access will "
  4292. "be enabled during recovery");
  4293. }
  4294. }
  4295. if (journal_inum && journal_dev) {
  4296. ext4_msg(sb, KERN_ERR, "filesystem has both journal "
  4297. "and inode journals!");
  4298. return -EINVAL;
  4299. }
  4300. if (journal_inum) {
  4301. if (!(journal = ext4_get_journal(sb, journal_inum)))
  4302. return -EINVAL;
  4303. } else {
  4304. if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
  4305. return -EINVAL;
  4306. }
  4307. if (!(journal->j_flags & JBD2_BARRIER))
  4308. ext4_msg(sb, KERN_INFO, "barriers disabled");
  4309. if (!ext4_has_feature_journal_needs_recovery(sb))
  4310. err = jbd2_journal_wipe(journal, !really_read_only);
  4311. if (!err) {
  4312. char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
  4313. if (save)
  4314. memcpy(save, ((char *) es) +
  4315. EXT4_S_ERR_START, EXT4_S_ERR_LEN);
  4316. err = jbd2_journal_load(journal);
  4317. if (save)
  4318. memcpy(((char *) es) + EXT4_S_ERR_START,
  4319. save, EXT4_S_ERR_LEN);
  4320. kfree(save);
  4321. }
  4322. if (err) {
  4323. ext4_msg(sb, KERN_ERR, "error loading journal");
  4324. jbd2_journal_destroy(journal);
  4325. return err;
  4326. }
  4327. EXT4_SB(sb)->s_journal = journal;
  4328. ext4_clear_journal_err(sb, es);
  4329. if (!really_read_only && journal_devnum &&
  4330. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4331. es->s_journal_dev = cpu_to_le32(journal_devnum);
  4332. /* Make sure we flush the recovery flag to disk. */
  4333. ext4_commit_super(sb, 1);
  4334. }
  4335. return 0;
  4336. }
  4337. static int ext4_commit_super(struct super_block *sb, int sync)
  4338. {
  4339. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  4340. struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
  4341. int error = 0;
  4342. if (!sbh || block_device_ejected(sb))
  4343. return error;
  4344. /*
  4345. * The superblock bh should be mapped, but it might not be if the
  4346. * device was hot-removed. Not much we can do but fail the I/O.
  4347. */
  4348. if (!buffer_mapped(sbh))
  4349. return error;
  4350. /*
  4351. * If the file system is mounted read-only, don't update the
  4352. * superblock write time. This avoids updating the superblock
  4353. * write time when we are mounting the root file system
  4354. * read/only but we need to replay the journal; at that point,
  4355. * for people who are east of GMT and who make their clock
  4356. * tick in localtime for Windows bug-for-bug compatibility,
  4357. * the clock is set in the future, and this will cause e2fsck
  4358. * to complain and force a full file system check.
  4359. */
  4360. if (!(sb->s_flags & SB_RDONLY))
  4361. ext4_update_tstamp(es, s_wtime);
  4362. if (sb->s_bdev->bd_part)
  4363. es->s_kbytes_written =
  4364. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
  4365. ((part_stat_read(sb->s_bdev->bd_part,
  4366. sectors[STAT_WRITE]) -
  4367. EXT4_SB(sb)->s_sectors_written_start) >> 1));
  4368. else
  4369. es->s_kbytes_written =
  4370. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
  4371. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
  4372. ext4_free_blocks_count_set(es,
  4373. EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
  4374. &EXT4_SB(sb)->s_freeclusters_counter)));
  4375. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
  4376. es->s_free_inodes_count =
  4377. cpu_to_le32(percpu_counter_sum_positive(
  4378. &EXT4_SB(sb)->s_freeinodes_counter));
  4379. BUFFER_TRACE(sbh, "marking dirty");
  4380. ext4_superblock_csum_set(sb);
  4381. if (sync)
  4382. lock_buffer(sbh);
  4383. if (buffer_write_io_error(sbh)) {
  4384. /*
  4385. * Oh, dear. A previous attempt to write the
  4386. * superblock failed. This could happen because the
  4387. * USB device was yanked out. Or it could happen to
  4388. * be a transient write error and maybe the block will
  4389. * be remapped. Nothing we can do but to retry the
  4390. * write and hope for the best.
  4391. */
  4392. ext4_msg(sb, KERN_ERR, "previous I/O error to "
  4393. "superblock detected");
  4394. clear_buffer_write_io_error(sbh);
  4395. set_buffer_uptodate(sbh);
  4396. }
  4397. mark_buffer_dirty(sbh);
  4398. if (sync) {
  4399. unlock_buffer(sbh);
  4400. error = __sync_dirty_buffer(sbh,
  4401. REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
  4402. if (buffer_write_io_error(sbh)) {
  4403. ext4_msg(sb, KERN_ERR, "I/O error while writing "
  4404. "superblock");
  4405. clear_buffer_write_io_error(sbh);
  4406. set_buffer_uptodate(sbh);
  4407. }
  4408. }
  4409. return error;
  4410. }
  4411. /*
  4412. * Have we just finished recovery? If so, and if we are mounting (or
  4413. * remounting) the filesystem readonly, then we will end up with a
  4414. * consistent fs on disk. Record that fact.
  4415. */
  4416. static void ext4_mark_recovery_complete(struct super_block *sb,
  4417. struct ext4_super_block *es)
  4418. {
  4419. journal_t *journal = EXT4_SB(sb)->s_journal;
  4420. if (!ext4_has_feature_journal(sb)) {
  4421. BUG_ON(journal != NULL);
  4422. return;
  4423. }
  4424. jbd2_journal_lock_updates(journal);
  4425. if (jbd2_journal_flush(journal) < 0)
  4426. goto out;
  4427. if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
  4428. ext4_clear_feature_journal_needs_recovery(sb);
  4429. ext4_commit_super(sb, 1);
  4430. }
  4431. out:
  4432. jbd2_journal_unlock_updates(journal);
  4433. }
  4434. /*
  4435. * If we are mounting (or read-write remounting) a filesystem whose journal
  4436. * has recorded an error from a previous lifetime, move that error to the
  4437. * main filesystem now.
  4438. */
  4439. static void ext4_clear_journal_err(struct super_block *sb,
  4440. struct ext4_super_block *es)
  4441. {
  4442. journal_t *journal;
  4443. int j_errno;
  4444. const char *errstr;
  4445. BUG_ON(!ext4_has_feature_journal(sb));
  4446. journal = EXT4_SB(sb)->s_journal;
  4447. /*
  4448. * Now check for any error status which may have been recorded in the
  4449. * journal by a prior ext4_error() or ext4_abort()
  4450. */
  4451. j_errno = jbd2_journal_errno(journal);
  4452. if (j_errno) {
  4453. char nbuf[16];
  4454. errstr = ext4_decode_error(sb, j_errno, nbuf);
  4455. ext4_warning(sb, "Filesystem error recorded "
  4456. "from previous mount: %s", errstr);
  4457. ext4_warning(sb, "Marking fs in need of filesystem check.");
  4458. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  4459. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  4460. ext4_commit_super(sb, 1);
  4461. jbd2_journal_clear_err(journal);
  4462. jbd2_journal_update_sb_errno(journal);
  4463. }
  4464. }
  4465. /*
  4466. * Force the running and committing transactions to commit,
  4467. * and wait on the commit.
  4468. */
  4469. int ext4_force_commit(struct super_block *sb)
  4470. {
  4471. journal_t *journal;
  4472. if (sb_rdonly(sb))
  4473. return 0;
  4474. journal = EXT4_SB(sb)->s_journal;
  4475. return ext4_journal_force_commit(journal);
  4476. }
  4477. static int ext4_sync_fs(struct super_block *sb, int wait)
  4478. {
  4479. int ret = 0;
  4480. tid_t target;
  4481. bool needs_barrier = false;
  4482. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4483. if (unlikely(ext4_forced_shutdown(sbi)))
  4484. return 0;
  4485. trace_ext4_sync_fs(sb, wait);
  4486. flush_workqueue(sbi->rsv_conversion_wq);
  4487. /*
  4488. * Writeback quota in non-journalled quota case - journalled quota has
  4489. * no dirty dquots
  4490. */
  4491. dquot_writeback_dquots(sb, -1);
  4492. /*
  4493. * Data writeback is possible w/o journal transaction, so barrier must
  4494. * being sent at the end of the function. But we can skip it if
  4495. * transaction_commit will do it for us.
  4496. */
  4497. if (sbi->s_journal) {
  4498. target = jbd2_get_latest_transaction(sbi->s_journal);
  4499. if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
  4500. !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
  4501. needs_barrier = true;
  4502. if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
  4503. if (wait)
  4504. ret = jbd2_log_wait_commit(sbi->s_journal,
  4505. target);
  4506. }
  4507. } else if (wait && test_opt(sb, BARRIER))
  4508. needs_barrier = true;
  4509. if (needs_barrier) {
  4510. int err;
  4511. err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
  4512. if (!ret)
  4513. ret = err;
  4514. }
  4515. return ret;
  4516. }
  4517. /*
  4518. * LVM calls this function before a (read-only) snapshot is created. This
  4519. * gives us a chance to flush the journal completely and mark the fs clean.
  4520. *
  4521. * Note that only this function cannot bring a filesystem to be in a clean
  4522. * state independently. It relies on upper layer to stop all data & metadata
  4523. * modifications.
  4524. */
  4525. static int ext4_freeze(struct super_block *sb)
  4526. {
  4527. int error = 0;
  4528. journal_t *journal;
  4529. if (sb_rdonly(sb))
  4530. return 0;
  4531. journal = EXT4_SB(sb)->s_journal;
  4532. if (journal) {
  4533. /* Now we set up the journal barrier. */
  4534. jbd2_journal_lock_updates(journal);
  4535. /*
  4536. * Don't clear the needs_recovery flag if we failed to
  4537. * flush the journal.
  4538. */
  4539. error = jbd2_journal_flush(journal);
  4540. if (error < 0)
  4541. goto out;
  4542. /* Journal blocked and flushed, clear needs_recovery flag. */
  4543. ext4_clear_feature_journal_needs_recovery(sb);
  4544. }
  4545. error = ext4_commit_super(sb, 1);
  4546. out:
  4547. if (journal)
  4548. /* we rely on upper layer to stop further updates */
  4549. jbd2_journal_unlock_updates(journal);
  4550. return error;
  4551. }
  4552. /*
  4553. * Called by LVM after the snapshot is done. We need to reset the RECOVER
  4554. * flag here, even though the filesystem is not technically dirty yet.
  4555. */
  4556. static int ext4_unfreeze(struct super_block *sb)
  4557. {
  4558. if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
  4559. return 0;
  4560. if (EXT4_SB(sb)->s_journal) {
  4561. /* Reset the needs_recovery flag before the fs is unlocked. */
  4562. ext4_set_feature_journal_needs_recovery(sb);
  4563. }
  4564. ext4_commit_super(sb, 1);
  4565. return 0;
  4566. }
  4567. /*
  4568. * Structure to save mount options for ext4_remount's benefit
  4569. */
  4570. struct ext4_mount_options {
  4571. unsigned long s_mount_opt;
  4572. unsigned long s_mount_opt2;
  4573. kuid_t s_resuid;
  4574. kgid_t s_resgid;
  4575. unsigned long s_commit_interval;
  4576. u32 s_min_batch_time, s_max_batch_time;
  4577. #ifdef CONFIG_QUOTA
  4578. int s_jquota_fmt;
  4579. char *s_qf_names[EXT4_MAXQUOTAS];
  4580. #endif
  4581. };
  4582. static int ext4_remount(struct super_block *sb, int *flags, char *data)
  4583. {
  4584. struct ext4_super_block *es;
  4585. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4586. unsigned long old_sb_flags;
  4587. struct ext4_mount_options old_opts;
  4588. int enable_quota = 0;
  4589. ext4_group_t g;
  4590. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  4591. int err = 0;
  4592. #ifdef CONFIG_QUOTA
  4593. int i, j;
  4594. char *to_free[EXT4_MAXQUOTAS];
  4595. #endif
  4596. char *orig_data = kstrdup(data, GFP_KERNEL);
  4597. if (data && !orig_data)
  4598. return -ENOMEM;
  4599. /* Store the original options */
  4600. old_sb_flags = sb->s_flags;
  4601. old_opts.s_mount_opt = sbi->s_mount_opt;
  4602. old_opts.s_mount_opt2 = sbi->s_mount_opt2;
  4603. old_opts.s_resuid = sbi->s_resuid;
  4604. old_opts.s_resgid = sbi->s_resgid;
  4605. old_opts.s_commit_interval = sbi->s_commit_interval;
  4606. old_opts.s_min_batch_time = sbi->s_min_batch_time;
  4607. old_opts.s_max_batch_time = sbi->s_max_batch_time;
  4608. #ifdef CONFIG_QUOTA
  4609. old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
  4610. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4611. if (sbi->s_qf_names[i]) {
  4612. char *qf_name = get_qf_name(sb, sbi, i);
  4613. old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
  4614. if (!old_opts.s_qf_names[i]) {
  4615. for (j = 0; j < i; j++)
  4616. kfree(old_opts.s_qf_names[j]);
  4617. kfree(orig_data);
  4618. return -ENOMEM;
  4619. }
  4620. } else
  4621. old_opts.s_qf_names[i] = NULL;
  4622. #endif
  4623. if (sbi->s_journal && sbi->s_journal->j_task->io_context)
  4624. journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
  4625. if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
  4626. err = -EINVAL;
  4627. goto restore_opts;
  4628. }
  4629. if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
  4630. test_opt(sb, JOURNAL_CHECKSUM)) {
  4631. ext4_msg(sb, KERN_ERR, "changing journal_checksum "
  4632. "during remount not supported; ignoring");
  4633. sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
  4634. }
  4635. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  4636. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  4637. ext4_msg(sb, KERN_ERR, "can't mount with "
  4638. "both data=journal and delalloc");
  4639. err = -EINVAL;
  4640. goto restore_opts;
  4641. }
  4642. if (test_opt(sb, DIOREAD_NOLOCK)) {
  4643. ext4_msg(sb, KERN_ERR, "can't mount with "
  4644. "both data=journal and dioread_nolock");
  4645. err = -EINVAL;
  4646. goto restore_opts;
  4647. }
  4648. if (test_opt(sb, DAX)) {
  4649. ext4_msg(sb, KERN_ERR, "can't mount with "
  4650. "both data=journal and dax");
  4651. err = -EINVAL;
  4652. goto restore_opts;
  4653. }
  4654. } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
  4655. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  4656. ext4_msg(sb, KERN_ERR, "can't mount with "
  4657. "journal_async_commit in data=ordered mode");
  4658. err = -EINVAL;
  4659. goto restore_opts;
  4660. }
  4661. }
  4662. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
  4663. ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
  4664. err = -EINVAL;
  4665. goto restore_opts;
  4666. }
  4667. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
  4668. ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
  4669. "dax flag with busy inodes while remounting");
  4670. sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
  4671. }
  4672. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
  4673. ext4_abort(sb, "Abort forced by user");
  4674. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  4675. (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
  4676. es = sbi->s_es;
  4677. if (sbi->s_journal) {
  4678. ext4_init_journal_params(sb, sbi->s_journal);
  4679. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  4680. }
  4681. if (*flags & SB_LAZYTIME)
  4682. sb->s_flags |= SB_LAZYTIME;
  4683. if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
  4684. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
  4685. err = -EROFS;
  4686. goto restore_opts;
  4687. }
  4688. if (*flags & SB_RDONLY) {
  4689. err = sync_filesystem(sb);
  4690. if (err < 0)
  4691. goto restore_opts;
  4692. err = dquot_suspend(sb, -1);
  4693. if (err < 0)
  4694. goto restore_opts;
  4695. /*
  4696. * First of all, the unconditional stuff we have to do
  4697. * to disable replay of the journal when we next remount
  4698. */
  4699. sb->s_flags |= SB_RDONLY;
  4700. /*
  4701. * OK, test if we are remounting a valid rw partition
  4702. * readonly, and if so set the rdonly flag and then
  4703. * mark the partition as valid again.
  4704. */
  4705. if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
  4706. (sbi->s_mount_state & EXT4_VALID_FS))
  4707. es->s_state = cpu_to_le16(sbi->s_mount_state);
  4708. if (sbi->s_journal)
  4709. ext4_mark_recovery_complete(sb, es);
  4710. if (sbi->s_mmp_tsk)
  4711. kthread_stop(sbi->s_mmp_tsk);
  4712. } else {
  4713. /* Make sure we can mount this feature set readwrite */
  4714. if (ext4_has_feature_readonly(sb) ||
  4715. !ext4_feature_set_ok(sb, 0)) {
  4716. err = -EROFS;
  4717. goto restore_opts;
  4718. }
  4719. /*
  4720. * Make sure the group descriptor checksums
  4721. * are sane. If they aren't, refuse to remount r/w.
  4722. */
  4723. for (g = 0; g < sbi->s_groups_count; g++) {
  4724. struct ext4_group_desc *gdp =
  4725. ext4_get_group_desc(sb, g, NULL);
  4726. if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
  4727. ext4_msg(sb, KERN_ERR,
  4728. "ext4_remount: Checksum for group %u failed (%u!=%u)",
  4729. g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
  4730. le16_to_cpu(gdp->bg_checksum));
  4731. err = -EFSBADCRC;
  4732. goto restore_opts;
  4733. }
  4734. }
  4735. /*
  4736. * If we have an unprocessed orphan list hanging
  4737. * around from a previously readonly bdev mount,
  4738. * require a full umount/remount for now.
  4739. */
  4740. if (es->s_last_orphan) {
  4741. ext4_msg(sb, KERN_WARNING, "Couldn't "
  4742. "remount RDWR because of unprocessed "
  4743. "orphan inode list. Please "
  4744. "umount/remount instead");
  4745. err = -EINVAL;
  4746. goto restore_opts;
  4747. }
  4748. /*
  4749. * Mounting a RDONLY partition read-write, so reread
  4750. * and store the current valid flag. (It may have
  4751. * been changed by e2fsck since we originally mounted
  4752. * the partition.)
  4753. */
  4754. if (sbi->s_journal)
  4755. ext4_clear_journal_err(sb, es);
  4756. sbi->s_mount_state = le16_to_cpu(es->s_state);
  4757. err = ext4_setup_super(sb, es, 0);
  4758. if (err)
  4759. goto restore_opts;
  4760. sb->s_flags &= ~SB_RDONLY;
  4761. if (ext4_has_feature_mmp(sb))
  4762. if (ext4_multi_mount_protect(sb,
  4763. le64_to_cpu(es->s_mmp_block))) {
  4764. err = -EROFS;
  4765. goto restore_opts;
  4766. }
  4767. enable_quota = 1;
  4768. }
  4769. }
  4770. /*
  4771. * Reinitialize lazy itable initialization thread based on
  4772. * current settings
  4773. */
  4774. if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
  4775. ext4_unregister_li_request(sb);
  4776. else {
  4777. ext4_group_t first_not_zeroed;
  4778. first_not_zeroed = ext4_has_uninit_itable(sb);
  4779. ext4_register_li_request(sb, first_not_zeroed);
  4780. }
  4781. ext4_setup_system_zone(sb);
  4782. if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
  4783. err = ext4_commit_super(sb, 1);
  4784. if (err)
  4785. goto restore_opts;
  4786. }
  4787. #ifdef CONFIG_QUOTA
  4788. /* Release old quota file names */
  4789. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4790. kfree(old_opts.s_qf_names[i]);
  4791. if (enable_quota) {
  4792. if (sb_any_quota_suspended(sb))
  4793. dquot_resume(sb, -1);
  4794. else if (ext4_has_feature_quota(sb)) {
  4795. err = ext4_enable_quotas(sb);
  4796. if (err)
  4797. goto restore_opts;
  4798. }
  4799. }
  4800. #endif
  4801. *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
  4802. ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
  4803. kfree(orig_data);
  4804. return 0;
  4805. restore_opts:
  4806. sb->s_flags = old_sb_flags;
  4807. sbi->s_mount_opt = old_opts.s_mount_opt;
  4808. sbi->s_mount_opt2 = old_opts.s_mount_opt2;
  4809. sbi->s_resuid = old_opts.s_resuid;
  4810. sbi->s_resgid = old_opts.s_resgid;
  4811. sbi->s_commit_interval = old_opts.s_commit_interval;
  4812. sbi->s_min_batch_time = old_opts.s_min_batch_time;
  4813. sbi->s_max_batch_time = old_opts.s_max_batch_time;
  4814. #ifdef CONFIG_QUOTA
  4815. sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
  4816. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  4817. to_free[i] = get_qf_name(sb, sbi, i);
  4818. rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
  4819. }
  4820. synchronize_rcu();
  4821. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4822. kfree(to_free[i]);
  4823. #endif
  4824. kfree(orig_data);
  4825. return err;
  4826. }
  4827. #ifdef CONFIG_QUOTA
  4828. static int ext4_statfs_project(struct super_block *sb,
  4829. kprojid_t projid, struct kstatfs *buf)
  4830. {
  4831. struct kqid qid;
  4832. struct dquot *dquot;
  4833. u64 limit;
  4834. u64 curblock;
  4835. qid = make_kqid_projid(projid);
  4836. dquot = dqget(sb, qid);
  4837. if (IS_ERR(dquot))
  4838. return PTR_ERR(dquot);
  4839. spin_lock(&dquot->dq_dqb_lock);
  4840. limit = (dquot->dq_dqb.dqb_bsoftlimit ?
  4841. dquot->dq_dqb.dqb_bsoftlimit :
  4842. dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
  4843. if (limit && buf->f_blocks > limit) {
  4844. curblock = (dquot->dq_dqb.dqb_curspace +
  4845. dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
  4846. buf->f_blocks = limit;
  4847. buf->f_bfree = buf->f_bavail =
  4848. (buf->f_blocks > curblock) ?
  4849. (buf->f_blocks - curblock) : 0;
  4850. }
  4851. limit = dquot->dq_dqb.dqb_isoftlimit ?
  4852. dquot->dq_dqb.dqb_isoftlimit :
  4853. dquot->dq_dqb.dqb_ihardlimit;
  4854. if (limit && buf->f_files > limit) {
  4855. buf->f_files = limit;
  4856. buf->f_ffree =
  4857. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  4858. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  4859. }
  4860. spin_unlock(&dquot->dq_dqb_lock);
  4861. dqput(dquot);
  4862. return 0;
  4863. }
  4864. #endif
  4865. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
  4866. {
  4867. struct super_block *sb = dentry->d_sb;
  4868. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4869. struct ext4_super_block *es = sbi->s_es;
  4870. ext4_fsblk_t overhead = 0, resv_blocks;
  4871. u64 fsid;
  4872. s64 bfree;
  4873. resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
  4874. if (!test_opt(sb, MINIX_DF))
  4875. overhead = sbi->s_overhead;
  4876. buf->f_type = EXT4_SUPER_MAGIC;
  4877. buf->f_bsize = sb->s_blocksize;
  4878. buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
  4879. bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
  4880. percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
  4881. /* prevent underflow in case that few free space is available */
  4882. buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
  4883. buf->f_bavail = buf->f_bfree -
  4884. (ext4_r_blocks_count(es) + resv_blocks);
  4885. if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
  4886. buf->f_bavail = 0;
  4887. buf->f_files = le32_to_cpu(es->s_inodes_count);
  4888. buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
  4889. buf->f_namelen = EXT4_NAME_LEN;
  4890. fsid = le64_to_cpup((void *)es->s_uuid) ^
  4891. le64_to_cpup((void *)es->s_uuid + sizeof(u64));
  4892. buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
  4893. buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
  4894. #ifdef CONFIG_QUOTA
  4895. if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
  4896. sb_has_quota_limits_enabled(sb, PRJQUOTA))
  4897. ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
  4898. #endif
  4899. return 0;
  4900. }
  4901. #ifdef CONFIG_QUOTA
  4902. /*
  4903. * Helper functions so that transaction is started before we acquire dqio_sem
  4904. * to keep correct lock ordering of transaction > dqio_sem
  4905. */
  4906. static inline struct inode *dquot_to_inode(struct dquot *dquot)
  4907. {
  4908. return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
  4909. }
  4910. static int ext4_write_dquot(struct dquot *dquot)
  4911. {
  4912. int ret, err;
  4913. handle_t *handle;
  4914. struct inode *inode;
  4915. inode = dquot_to_inode(dquot);
  4916. handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
  4917. EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
  4918. if (IS_ERR(handle))
  4919. return PTR_ERR(handle);
  4920. ret = dquot_commit(dquot);
  4921. err = ext4_journal_stop(handle);
  4922. if (!ret)
  4923. ret = err;
  4924. return ret;
  4925. }
  4926. static int ext4_acquire_dquot(struct dquot *dquot)
  4927. {
  4928. int ret, err;
  4929. handle_t *handle;
  4930. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4931. EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
  4932. if (IS_ERR(handle))
  4933. return PTR_ERR(handle);
  4934. ret = dquot_acquire(dquot);
  4935. err = ext4_journal_stop(handle);
  4936. if (!ret)
  4937. ret = err;
  4938. return ret;
  4939. }
  4940. static int ext4_release_dquot(struct dquot *dquot)
  4941. {
  4942. int ret, err;
  4943. handle_t *handle;
  4944. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4945. EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
  4946. if (IS_ERR(handle)) {
  4947. /* Release dquot anyway to avoid endless cycle in dqput() */
  4948. dquot_release(dquot);
  4949. return PTR_ERR(handle);
  4950. }
  4951. ret = dquot_release(dquot);
  4952. err = ext4_journal_stop(handle);
  4953. if (!ret)
  4954. ret = err;
  4955. return ret;
  4956. }
  4957. static int ext4_mark_dquot_dirty(struct dquot *dquot)
  4958. {
  4959. struct super_block *sb = dquot->dq_sb;
  4960. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4961. /* Are we journaling quotas? */
  4962. if (ext4_has_feature_quota(sb) ||
  4963. sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  4964. dquot_mark_dquot_dirty(dquot);
  4965. return ext4_write_dquot(dquot);
  4966. } else {
  4967. return dquot_mark_dquot_dirty(dquot);
  4968. }
  4969. }
  4970. static int ext4_write_info(struct super_block *sb, int type)
  4971. {
  4972. int ret, err;
  4973. handle_t *handle;
  4974. /* Data block + inode block */
  4975. handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
  4976. if (IS_ERR(handle))
  4977. return PTR_ERR(handle);
  4978. ret = dquot_commit_info(sb, type);
  4979. err = ext4_journal_stop(handle);
  4980. if (!ret)
  4981. ret = err;
  4982. return ret;
  4983. }
  4984. /*
  4985. * Turn on quotas during mount time - we need to find
  4986. * the quota file and such...
  4987. */
  4988. static int ext4_quota_on_mount(struct super_block *sb, int type)
  4989. {
  4990. return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
  4991. EXT4_SB(sb)->s_jquota_fmt, type);
  4992. }
  4993. static void lockdep_set_quota_inode(struct inode *inode, int subclass)
  4994. {
  4995. struct ext4_inode_info *ei = EXT4_I(inode);
  4996. /* The first argument of lockdep_set_subclass has to be
  4997. * *exactly* the same as the argument to init_rwsem() --- in
  4998. * this case, in init_once() --- or lockdep gets unhappy
  4999. * because the name of the lock is set using the
  5000. * stringification of the argument to init_rwsem().
  5001. */
  5002. (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
  5003. lockdep_set_subclass(&ei->i_data_sem, subclass);
  5004. }
  5005. /*
  5006. * Standard function to be called on quota_on
  5007. */
  5008. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  5009. const struct path *path)
  5010. {
  5011. int err;
  5012. if (!test_opt(sb, QUOTA))
  5013. return -EINVAL;
  5014. /* Quotafile not on the same filesystem? */
  5015. if (path->dentry->d_sb != sb)
  5016. return -EXDEV;
  5017. /* Journaling quota? */
  5018. if (EXT4_SB(sb)->s_qf_names[type]) {
  5019. /* Quotafile not in fs root? */
  5020. if (path->dentry->d_parent != sb->s_root)
  5021. ext4_msg(sb, KERN_WARNING,
  5022. "Quota file not on filesystem root. "
  5023. "Journaled quota will not work");
  5024. sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
  5025. } else {
  5026. /*
  5027. * Clear the flag just in case mount options changed since
  5028. * last time.
  5029. */
  5030. sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
  5031. }
  5032. /*
  5033. * When we journal data on quota file, we have to flush journal to see
  5034. * all updates to the file when we bypass pagecache...
  5035. */
  5036. if (EXT4_SB(sb)->s_journal &&
  5037. ext4_should_journal_data(d_inode(path->dentry))) {
  5038. /*
  5039. * We don't need to lock updates but journal_flush() could
  5040. * otherwise be livelocked...
  5041. */
  5042. jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
  5043. err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
  5044. jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
  5045. if (err)
  5046. return err;
  5047. }
  5048. lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
  5049. err = dquot_quota_on(sb, type, format_id, path);
  5050. if (err) {
  5051. lockdep_set_quota_inode(path->dentry->d_inode,
  5052. I_DATA_SEM_NORMAL);
  5053. } else {
  5054. struct inode *inode = d_inode(path->dentry);
  5055. handle_t *handle;
  5056. /*
  5057. * Set inode flags to prevent userspace from messing with quota
  5058. * files. If this fails, we return success anyway since quotas
  5059. * are already enabled and this is not a hard failure.
  5060. */
  5061. inode_lock(inode);
  5062. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  5063. if (IS_ERR(handle))
  5064. goto unlock_inode;
  5065. EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
  5066. inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
  5067. S_NOATIME | S_IMMUTABLE);
  5068. ext4_mark_inode_dirty(handle, inode);
  5069. ext4_journal_stop(handle);
  5070. unlock_inode:
  5071. inode_unlock(inode);
  5072. }
  5073. return err;
  5074. }
  5075. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  5076. unsigned int flags)
  5077. {
  5078. int err;
  5079. struct inode *qf_inode;
  5080. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  5081. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  5082. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  5083. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  5084. };
  5085. BUG_ON(!ext4_has_feature_quota(sb));
  5086. if (!qf_inums[type])
  5087. return -EPERM;
  5088. qf_inode = ext4_iget(sb, qf_inums[type]);
  5089. if (IS_ERR(qf_inode)) {
  5090. ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
  5091. return PTR_ERR(qf_inode);
  5092. }
  5093. /* Don't account quota for quota files to avoid recursion */
  5094. qf_inode->i_flags |= S_NOQUOTA;
  5095. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
  5096. err = dquot_enable(qf_inode, type, format_id, flags);
  5097. iput(qf_inode);
  5098. if (err)
  5099. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
  5100. return err;
  5101. }
  5102. /* Enable usage tracking for all quota types. */
  5103. static int ext4_enable_quotas(struct super_block *sb)
  5104. {
  5105. int type, err = 0;
  5106. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  5107. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  5108. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  5109. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  5110. };
  5111. bool quota_mopt[EXT4_MAXQUOTAS] = {
  5112. test_opt(sb, USRQUOTA),
  5113. test_opt(sb, GRPQUOTA),
  5114. test_opt(sb, PRJQUOTA),
  5115. };
  5116. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
  5117. for (type = 0; type < EXT4_MAXQUOTAS; type++) {
  5118. if (qf_inums[type]) {
  5119. err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
  5120. DQUOT_USAGE_ENABLED |
  5121. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  5122. if (err) {
  5123. ext4_warning(sb,
  5124. "Failed to enable quota tracking "
  5125. "(type=%d, err=%d). Please run "
  5126. "e2fsck to fix.", type, err);
  5127. for (type--; type >= 0; type--)
  5128. dquot_quota_off(sb, type);
  5129. return err;
  5130. }
  5131. }
  5132. }
  5133. return 0;
  5134. }
  5135. static int ext4_quota_off(struct super_block *sb, int type)
  5136. {
  5137. struct inode *inode = sb_dqopt(sb)->files[type];
  5138. handle_t *handle;
  5139. int err;
  5140. /* Force all delayed allocation blocks to be allocated.
  5141. * Caller already holds s_umount sem */
  5142. if (test_opt(sb, DELALLOC))
  5143. sync_filesystem(sb);
  5144. if (!inode || !igrab(inode))
  5145. goto out;
  5146. err = dquot_quota_off(sb, type);
  5147. if (err || ext4_has_feature_quota(sb))
  5148. goto out_put;
  5149. inode_lock(inode);
  5150. /*
  5151. * Update modification times of quota files when userspace can
  5152. * start looking at them. If we fail, we return success anyway since
  5153. * this is not a hard failure and quotas are already disabled.
  5154. */
  5155. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  5156. if (IS_ERR(handle))
  5157. goto out_unlock;
  5158. EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
  5159. inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
  5160. inode->i_mtime = inode->i_ctime = current_time(inode);
  5161. ext4_mark_inode_dirty(handle, inode);
  5162. ext4_journal_stop(handle);
  5163. out_unlock:
  5164. inode_unlock(inode);
  5165. out_put:
  5166. lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
  5167. iput(inode);
  5168. return err;
  5169. out:
  5170. return dquot_quota_off(sb, type);
  5171. }
  5172. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  5173. * acquiring the locks... As quota files are never truncated and quota code
  5174. * itself serializes the operations (and no one else should touch the files)
  5175. * we don't have to be afraid of races */
  5176. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  5177. size_t len, loff_t off)
  5178. {
  5179. struct inode *inode = sb_dqopt(sb)->files[type];
  5180. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5181. int offset = off & (sb->s_blocksize - 1);
  5182. int tocopy;
  5183. size_t toread;
  5184. struct buffer_head *bh;
  5185. loff_t i_size = i_size_read(inode);
  5186. if (off > i_size)
  5187. return 0;
  5188. if (off+len > i_size)
  5189. len = i_size-off;
  5190. toread = len;
  5191. while (toread > 0) {
  5192. tocopy = sb->s_blocksize - offset < toread ?
  5193. sb->s_blocksize - offset : toread;
  5194. bh = ext4_bread(NULL, inode, blk, 0);
  5195. if (IS_ERR(bh))
  5196. return PTR_ERR(bh);
  5197. if (!bh) /* A hole? */
  5198. memset(data, 0, tocopy);
  5199. else
  5200. memcpy(data, bh->b_data+offset, tocopy);
  5201. brelse(bh);
  5202. offset = 0;
  5203. toread -= tocopy;
  5204. data += tocopy;
  5205. blk++;
  5206. }
  5207. return len;
  5208. }
  5209. /* Write to quotafile (we know the transaction is already started and has
  5210. * enough credits) */
  5211. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  5212. const char *data, size_t len, loff_t off)
  5213. {
  5214. struct inode *inode = sb_dqopt(sb)->files[type];
  5215. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5216. int err, offset = off & (sb->s_blocksize - 1);
  5217. int retries = 0;
  5218. struct buffer_head *bh;
  5219. handle_t *handle = journal_current_handle();
  5220. if (EXT4_SB(sb)->s_journal && !handle) {
  5221. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5222. " cancelled because transaction is not started",
  5223. (unsigned long long)off, (unsigned long long)len);
  5224. return -EIO;
  5225. }
  5226. /*
  5227. * Since we account only one data block in transaction credits,
  5228. * then it is impossible to cross a block boundary.
  5229. */
  5230. if (sb->s_blocksize - offset < len) {
  5231. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5232. " cancelled because not block aligned",
  5233. (unsigned long long)off, (unsigned long long)len);
  5234. return -EIO;
  5235. }
  5236. do {
  5237. bh = ext4_bread(handle, inode, blk,
  5238. EXT4_GET_BLOCKS_CREATE |
  5239. EXT4_GET_BLOCKS_METADATA_NOFAIL);
  5240. } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
  5241. ext4_should_retry_alloc(inode->i_sb, &retries));
  5242. if (IS_ERR(bh))
  5243. return PTR_ERR(bh);
  5244. if (!bh)
  5245. goto out;
  5246. BUFFER_TRACE(bh, "get write access");
  5247. err = ext4_journal_get_write_access(handle, bh);
  5248. if (err) {
  5249. brelse(bh);
  5250. return err;
  5251. }
  5252. lock_buffer(bh);
  5253. memcpy(bh->b_data+offset, data, len);
  5254. flush_dcache_page(bh->b_page);
  5255. unlock_buffer(bh);
  5256. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  5257. brelse(bh);
  5258. out:
  5259. if (inode->i_size < off + len) {
  5260. i_size_write(inode, off + len);
  5261. EXT4_I(inode)->i_disksize = inode->i_size;
  5262. ext4_mark_inode_dirty(handle, inode);
  5263. }
  5264. return len;
  5265. }
  5266. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
  5267. {
  5268. const struct quota_format_ops *ops;
  5269. if (!sb_has_quota_loaded(sb, qid->type))
  5270. return -ESRCH;
  5271. ops = sb_dqopt(sb)->ops[qid->type];
  5272. if (!ops || !ops->get_next_id)
  5273. return -ENOSYS;
  5274. return dquot_get_next_id(sb, qid);
  5275. }
  5276. #endif
  5277. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  5278. const char *dev_name, void *data)
  5279. {
  5280. return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
  5281. }
  5282. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  5283. static inline void register_as_ext2(void)
  5284. {
  5285. int err = register_filesystem(&ext2_fs_type);
  5286. if (err)
  5287. printk(KERN_WARNING
  5288. "EXT4-fs: Unable to register as ext2 (%d)\n", err);
  5289. }
  5290. static inline void unregister_as_ext2(void)
  5291. {
  5292. unregister_filesystem(&ext2_fs_type);
  5293. }
  5294. static inline int ext2_feature_set_ok(struct super_block *sb)
  5295. {
  5296. if (ext4_has_unknown_ext2_incompat_features(sb))
  5297. return 0;
  5298. if (sb_rdonly(sb))
  5299. return 1;
  5300. if (ext4_has_unknown_ext2_ro_compat_features(sb))
  5301. return 0;
  5302. return 1;
  5303. }
  5304. #else
  5305. static inline void register_as_ext2(void) { }
  5306. static inline void unregister_as_ext2(void) { }
  5307. static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
  5308. #endif
  5309. static inline void register_as_ext3(void)
  5310. {
  5311. int err = register_filesystem(&ext3_fs_type);
  5312. if (err)
  5313. printk(KERN_WARNING
  5314. "EXT4-fs: Unable to register as ext3 (%d)\n", err);
  5315. }
  5316. static inline void unregister_as_ext3(void)
  5317. {
  5318. unregister_filesystem(&ext3_fs_type);
  5319. }
  5320. static inline int ext3_feature_set_ok(struct super_block *sb)
  5321. {
  5322. if (ext4_has_unknown_ext3_incompat_features(sb))
  5323. return 0;
  5324. if (!ext4_has_feature_journal(sb))
  5325. return 0;
  5326. if (sb_rdonly(sb))
  5327. return 1;
  5328. if (ext4_has_unknown_ext3_ro_compat_features(sb))
  5329. return 0;
  5330. return 1;
  5331. }
  5332. static struct file_system_type ext4_fs_type = {
  5333. .owner = THIS_MODULE,
  5334. .name = "ext4",
  5335. .mount = ext4_mount,
  5336. .kill_sb = kill_block_super,
  5337. .fs_flags = FS_REQUIRES_DEV,
  5338. };
  5339. MODULE_ALIAS_FS("ext4");
  5340. /* Shared across all ext4 file systems */
  5341. wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
  5342. static int __init ext4_init_fs(void)
  5343. {
  5344. int i, err;
  5345. ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
  5346. ext4_li_info = NULL;
  5347. mutex_init(&ext4_li_mtx);
  5348. /* Build-time check for flags consistency */
  5349. ext4_check_flag_values();
  5350. for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
  5351. init_waitqueue_head(&ext4__ioend_wq[i]);
  5352. err = ext4_init_es();
  5353. if (err)
  5354. return err;
  5355. err = ext4_init_pending();
  5356. if (err)
  5357. goto out6;
  5358. err = ext4_init_pageio();
  5359. if (err)
  5360. goto out5;
  5361. err = ext4_init_system_zone();
  5362. if (err)
  5363. goto out4;
  5364. err = ext4_init_sysfs();
  5365. if (err)
  5366. goto out3;
  5367. err = ext4_init_mballoc();
  5368. if (err)
  5369. goto out2;
  5370. err = init_inodecache();
  5371. if (err)
  5372. goto out1;
  5373. register_as_ext3();
  5374. register_as_ext2();
  5375. err = register_filesystem(&ext4_fs_type);
  5376. if (err)
  5377. goto out;
  5378. return 0;
  5379. out:
  5380. unregister_as_ext2();
  5381. unregister_as_ext3();
  5382. destroy_inodecache();
  5383. out1:
  5384. ext4_exit_mballoc();
  5385. out2:
  5386. ext4_exit_sysfs();
  5387. out3:
  5388. ext4_exit_system_zone();
  5389. out4:
  5390. ext4_exit_pageio();
  5391. out5:
  5392. ext4_exit_pending();
  5393. out6:
  5394. ext4_exit_es();
  5395. return err;
  5396. }
  5397. static void __exit ext4_exit_fs(void)
  5398. {
  5399. ext4_destroy_lazyinit_thread();
  5400. unregister_as_ext2();
  5401. unregister_as_ext3();
  5402. unregister_filesystem(&ext4_fs_type);
  5403. destroy_inodecache();
  5404. ext4_exit_mballoc();
  5405. ext4_exit_sysfs();
  5406. ext4_exit_system_zone();
  5407. ext4_exit_pageio();
  5408. ext4_exit_es();
  5409. ext4_exit_pending();
  5410. }
  5411. MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
  5412. MODULE_DESCRIPTION("Fourth Extended Filesystem");
  5413. MODULE_LICENSE("GPL");
  5414. MODULE_SOFTDEP("pre: crc32c");
  5415. module_init(ext4_init_fs)
  5416. module_exit(ext4_exit_fs)