super.c 166 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864
  1. /*
  2. * linux/fs/ext4/super.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Big-endian to little-endian byte-swapping/bitmaps by
  16. * David S. Miller (davem@caip.rutgers.edu), 1995
  17. */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/fs.h>
  21. #include <linux/time.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/slab.h>
  24. #include <linux/init.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/parser.h>
  28. #include <linux/buffer_head.h>
  29. #include <linux/exportfs.h>
  30. #include <linux/vfs.h>
  31. #include <linux/random.h>
  32. #include <linux/mount.h>
  33. #include <linux/namei.h>
  34. #include <linux/quotaops.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/ctype.h>
  37. #include <linux/log2.h>
  38. #include <linux/crc16.h>
  39. #include <linux/dax.h>
  40. #include <linux/cleancache.h>
  41. #include <linux/uaccess.h>
  42. #include <linux/kthread.h>
  43. #include <linux/freezer.h>
  44. #include "ext4.h"
  45. #include "ext4_extents.h" /* Needed for trace points definition */
  46. #include "ext4_jbd2.h"
  47. #include "xattr.h"
  48. #include "acl.h"
  49. #include "mballoc.h"
  50. #include "fsmap.h"
  51. #define CREATE_TRACE_POINTS
  52. #include <trace/events/ext4.h>
  53. static struct ext4_lazy_init *ext4_li_info;
  54. static struct mutex ext4_li_mtx;
  55. static struct ratelimit_state ext4_mount_msg_ratelimit;
  56. static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
  57. unsigned long journal_devnum);
  58. static int ext4_show_options(struct seq_file *seq, struct dentry *root);
  59. static int ext4_commit_super(struct super_block *sb, int sync);
  60. static void ext4_mark_recovery_complete(struct super_block *sb,
  61. struct ext4_super_block *es);
  62. static void ext4_clear_journal_err(struct super_block *sb,
  63. struct ext4_super_block *es);
  64. static int ext4_sync_fs(struct super_block *sb, int wait);
  65. static int ext4_remount(struct super_block *sb, int *flags, char *data);
  66. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
  67. static int ext4_unfreeze(struct super_block *sb);
  68. static int ext4_freeze(struct super_block *sb);
  69. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  70. const char *dev_name, void *data);
  71. static inline int ext2_feature_set_ok(struct super_block *sb);
  72. static inline int ext3_feature_set_ok(struct super_block *sb);
  73. static int ext4_feature_set_ok(struct super_block *sb, int readonly);
  74. static void ext4_destroy_lazyinit_thread(void);
  75. static void ext4_unregister_li_request(struct super_block *sb);
  76. static void ext4_clear_request_list(void);
  77. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  78. unsigned int journal_inum);
  79. /*
  80. * Lock ordering
  81. *
  82. * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
  83. * i_mmap_rwsem (inode->i_mmap_rwsem)!
  84. *
  85. * page fault path:
  86. * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
  87. * page lock -> i_data_sem (rw)
  88. *
  89. * buffered write path:
  90. * sb_start_write -> i_mutex -> mmap_sem
  91. * sb_start_write -> i_mutex -> transaction start -> page lock ->
  92. * i_data_sem (rw)
  93. *
  94. * truncate:
  95. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
  96. * i_mmap_rwsem (w) -> page lock
  97. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
  98. * transaction start -> i_data_sem (rw)
  99. *
  100. * direct IO:
  101. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) -> mmap_sem
  102. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) ->
  103. * transaction start -> i_data_sem (rw)
  104. *
  105. * writepages:
  106. * transaction start -> page lock(s) -> i_data_sem (rw)
  107. */
  108. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  109. static struct file_system_type ext2_fs_type = {
  110. .owner = THIS_MODULE,
  111. .name = "ext2",
  112. .mount = ext4_mount,
  113. .kill_sb = kill_block_super,
  114. .fs_flags = FS_REQUIRES_DEV,
  115. };
  116. MODULE_ALIAS_FS("ext2");
  117. MODULE_ALIAS("ext2");
  118. #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
  119. #else
  120. #define IS_EXT2_SB(sb) (0)
  121. #endif
  122. static struct file_system_type ext3_fs_type = {
  123. .owner = THIS_MODULE,
  124. .name = "ext3",
  125. .mount = ext4_mount,
  126. .kill_sb = kill_block_super,
  127. .fs_flags = FS_REQUIRES_DEV,
  128. };
  129. MODULE_ALIAS_FS("ext3");
  130. MODULE_ALIAS("ext3");
  131. #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
  132. static int ext4_verify_csum_type(struct super_block *sb,
  133. struct ext4_super_block *es)
  134. {
  135. if (!ext4_has_feature_metadata_csum(sb))
  136. return 1;
  137. return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
  138. }
  139. static __le32 ext4_superblock_csum(struct super_block *sb,
  140. struct ext4_super_block *es)
  141. {
  142. struct ext4_sb_info *sbi = EXT4_SB(sb);
  143. int offset = offsetof(struct ext4_super_block, s_checksum);
  144. __u32 csum;
  145. csum = ext4_chksum(sbi, ~0, (char *)es, offset);
  146. return cpu_to_le32(csum);
  147. }
  148. static int ext4_superblock_csum_verify(struct super_block *sb,
  149. struct ext4_super_block *es)
  150. {
  151. if (!ext4_has_metadata_csum(sb))
  152. return 1;
  153. return es->s_checksum == ext4_superblock_csum(sb, es);
  154. }
  155. void ext4_superblock_csum_set(struct super_block *sb)
  156. {
  157. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  158. if (!ext4_has_metadata_csum(sb))
  159. return;
  160. es->s_checksum = ext4_superblock_csum(sb, es);
  161. }
  162. void *ext4_kvmalloc(size_t size, gfp_t flags)
  163. {
  164. void *ret;
  165. ret = kmalloc(size, flags | __GFP_NOWARN);
  166. if (!ret)
  167. ret = __vmalloc(size, flags, PAGE_KERNEL);
  168. return ret;
  169. }
  170. void *ext4_kvzalloc(size_t size, gfp_t flags)
  171. {
  172. void *ret;
  173. ret = kzalloc(size, flags | __GFP_NOWARN);
  174. if (!ret)
  175. ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
  176. return ret;
  177. }
  178. ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
  179. struct ext4_group_desc *bg)
  180. {
  181. return le32_to_cpu(bg->bg_block_bitmap_lo) |
  182. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  183. (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
  184. }
  185. ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
  186. struct ext4_group_desc *bg)
  187. {
  188. return le32_to_cpu(bg->bg_inode_bitmap_lo) |
  189. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  190. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
  191. }
  192. ext4_fsblk_t ext4_inode_table(struct super_block *sb,
  193. struct ext4_group_desc *bg)
  194. {
  195. return le32_to_cpu(bg->bg_inode_table_lo) |
  196. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  197. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
  198. }
  199. __u32 ext4_free_group_clusters(struct super_block *sb,
  200. struct ext4_group_desc *bg)
  201. {
  202. return le16_to_cpu(bg->bg_free_blocks_count_lo) |
  203. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  204. (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
  205. }
  206. __u32 ext4_free_inodes_count(struct super_block *sb,
  207. struct ext4_group_desc *bg)
  208. {
  209. return le16_to_cpu(bg->bg_free_inodes_count_lo) |
  210. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  211. (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
  212. }
  213. __u32 ext4_used_dirs_count(struct super_block *sb,
  214. struct ext4_group_desc *bg)
  215. {
  216. return le16_to_cpu(bg->bg_used_dirs_count_lo) |
  217. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  218. (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
  219. }
  220. __u32 ext4_itable_unused_count(struct super_block *sb,
  221. struct ext4_group_desc *bg)
  222. {
  223. return le16_to_cpu(bg->bg_itable_unused_lo) |
  224. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  225. (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
  226. }
  227. void ext4_block_bitmap_set(struct super_block *sb,
  228. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  229. {
  230. bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
  231. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  232. bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
  233. }
  234. void ext4_inode_bitmap_set(struct super_block *sb,
  235. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  236. {
  237. bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
  238. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  239. bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
  240. }
  241. void ext4_inode_table_set(struct super_block *sb,
  242. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  243. {
  244. bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
  245. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  246. bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
  247. }
  248. void ext4_free_group_clusters_set(struct super_block *sb,
  249. struct ext4_group_desc *bg, __u32 count)
  250. {
  251. bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
  252. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  253. bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
  254. }
  255. void ext4_free_inodes_set(struct super_block *sb,
  256. struct ext4_group_desc *bg, __u32 count)
  257. {
  258. bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
  259. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  260. bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
  261. }
  262. void ext4_used_dirs_set(struct super_block *sb,
  263. struct ext4_group_desc *bg, __u32 count)
  264. {
  265. bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
  266. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  267. bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
  268. }
  269. void ext4_itable_unused_set(struct super_block *sb,
  270. struct ext4_group_desc *bg, __u32 count)
  271. {
  272. bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
  273. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  274. bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
  275. }
  276. static void __save_error_info(struct super_block *sb, const char *func,
  277. unsigned int line)
  278. {
  279. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  280. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  281. if (bdev_read_only(sb->s_bdev))
  282. return;
  283. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  284. es->s_last_error_time = cpu_to_le32(get_seconds());
  285. strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
  286. es->s_last_error_line = cpu_to_le32(line);
  287. if (!es->s_first_error_time) {
  288. es->s_first_error_time = es->s_last_error_time;
  289. strncpy(es->s_first_error_func, func,
  290. sizeof(es->s_first_error_func));
  291. es->s_first_error_line = cpu_to_le32(line);
  292. es->s_first_error_ino = es->s_last_error_ino;
  293. es->s_first_error_block = es->s_last_error_block;
  294. }
  295. /*
  296. * Start the daily error reporting function if it hasn't been
  297. * started already
  298. */
  299. if (!es->s_error_count)
  300. mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
  301. le32_add_cpu(&es->s_error_count, 1);
  302. }
  303. static void save_error_info(struct super_block *sb, const char *func,
  304. unsigned int line)
  305. {
  306. __save_error_info(sb, func, line);
  307. ext4_commit_super(sb, 1);
  308. }
  309. /*
  310. * The del_gendisk() function uninitializes the disk-specific data
  311. * structures, including the bdi structure, without telling anyone
  312. * else. Once this happens, any attempt to call mark_buffer_dirty()
  313. * (for example, by ext4_commit_super), will cause a kernel OOPS.
  314. * This is a kludge to prevent these oops until we can put in a proper
  315. * hook in del_gendisk() to inform the VFS and file system layers.
  316. */
  317. static int block_device_ejected(struct super_block *sb)
  318. {
  319. struct inode *bd_inode = sb->s_bdev->bd_inode;
  320. struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
  321. return bdi->dev == NULL;
  322. }
  323. static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
  324. {
  325. struct super_block *sb = journal->j_private;
  326. struct ext4_sb_info *sbi = EXT4_SB(sb);
  327. int error = is_journal_aborted(journal);
  328. struct ext4_journal_cb_entry *jce;
  329. BUG_ON(txn->t_state == T_FINISHED);
  330. ext4_process_freed_data(sb, txn->t_tid);
  331. spin_lock(&sbi->s_md_lock);
  332. while (!list_empty(&txn->t_private_list)) {
  333. jce = list_entry(txn->t_private_list.next,
  334. struct ext4_journal_cb_entry, jce_list);
  335. list_del_init(&jce->jce_list);
  336. spin_unlock(&sbi->s_md_lock);
  337. jce->jce_func(sb, jce, error);
  338. spin_lock(&sbi->s_md_lock);
  339. }
  340. spin_unlock(&sbi->s_md_lock);
  341. }
  342. /* Deal with the reporting of failure conditions on a filesystem such as
  343. * inconsistencies detected or read IO failures.
  344. *
  345. * On ext2, we can store the error state of the filesystem in the
  346. * superblock. That is not possible on ext4, because we may have other
  347. * write ordering constraints on the superblock which prevent us from
  348. * writing it out straight away; and given that the journal is about to
  349. * be aborted, we can't rely on the current, or future, transactions to
  350. * write out the superblock safely.
  351. *
  352. * We'll just use the jbd2_journal_abort() error code to record an error in
  353. * the journal instead. On recovery, the journal will complain about
  354. * that error until we've noted it down and cleared it.
  355. */
  356. static void ext4_handle_error(struct super_block *sb)
  357. {
  358. if (sb_rdonly(sb))
  359. return;
  360. if (!test_opt(sb, ERRORS_CONT)) {
  361. journal_t *journal = EXT4_SB(sb)->s_journal;
  362. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  363. if (journal)
  364. jbd2_journal_abort(journal, -EIO);
  365. }
  366. if (test_opt(sb, ERRORS_RO)) {
  367. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  368. /*
  369. * Make sure updated value of ->s_mount_flags will be visible
  370. * before ->s_flags update
  371. */
  372. smp_wmb();
  373. sb->s_flags |= SB_RDONLY;
  374. }
  375. if (test_opt(sb, ERRORS_PANIC)) {
  376. if (EXT4_SB(sb)->s_journal &&
  377. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  378. return;
  379. panic("EXT4-fs (device %s): panic forced after error\n",
  380. sb->s_id);
  381. }
  382. }
  383. #define ext4_error_ratelimit(sb) \
  384. ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
  385. "EXT4-fs error")
  386. void __ext4_error(struct super_block *sb, const char *function,
  387. unsigned int line, const char *fmt, ...)
  388. {
  389. struct va_format vaf;
  390. va_list args;
  391. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  392. return;
  393. if (ext4_error_ratelimit(sb)) {
  394. va_start(args, fmt);
  395. vaf.fmt = fmt;
  396. vaf.va = &args;
  397. printk(KERN_CRIT
  398. "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
  399. sb->s_id, function, line, current->comm, &vaf);
  400. va_end(args);
  401. }
  402. save_error_info(sb, function, line);
  403. ext4_handle_error(sb);
  404. }
  405. void __ext4_error_inode(struct inode *inode, const char *function,
  406. unsigned int line, ext4_fsblk_t block,
  407. const char *fmt, ...)
  408. {
  409. va_list args;
  410. struct va_format vaf;
  411. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  412. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  413. return;
  414. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  415. es->s_last_error_block = cpu_to_le64(block);
  416. if (ext4_error_ratelimit(inode->i_sb)) {
  417. va_start(args, fmt);
  418. vaf.fmt = fmt;
  419. vaf.va = &args;
  420. if (block)
  421. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  422. "inode #%lu: block %llu: comm %s: %pV\n",
  423. inode->i_sb->s_id, function, line, inode->i_ino,
  424. block, current->comm, &vaf);
  425. else
  426. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  427. "inode #%lu: comm %s: %pV\n",
  428. inode->i_sb->s_id, function, line, inode->i_ino,
  429. current->comm, &vaf);
  430. va_end(args);
  431. }
  432. save_error_info(inode->i_sb, function, line);
  433. ext4_handle_error(inode->i_sb);
  434. }
  435. void __ext4_error_file(struct file *file, const char *function,
  436. unsigned int line, ext4_fsblk_t block,
  437. const char *fmt, ...)
  438. {
  439. va_list args;
  440. struct va_format vaf;
  441. struct ext4_super_block *es;
  442. struct inode *inode = file_inode(file);
  443. char pathname[80], *path;
  444. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  445. return;
  446. es = EXT4_SB(inode->i_sb)->s_es;
  447. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  448. if (ext4_error_ratelimit(inode->i_sb)) {
  449. path = file_path(file, pathname, sizeof(pathname));
  450. if (IS_ERR(path))
  451. path = "(unknown)";
  452. va_start(args, fmt);
  453. vaf.fmt = fmt;
  454. vaf.va = &args;
  455. if (block)
  456. printk(KERN_CRIT
  457. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  458. "block %llu: comm %s: path %s: %pV\n",
  459. inode->i_sb->s_id, function, line, inode->i_ino,
  460. block, current->comm, path, &vaf);
  461. else
  462. printk(KERN_CRIT
  463. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  464. "comm %s: path %s: %pV\n",
  465. inode->i_sb->s_id, function, line, inode->i_ino,
  466. current->comm, path, &vaf);
  467. va_end(args);
  468. }
  469. save_error_info(inode->i_sb, function, line);
  470. ext4_handle_error(inode->i_sb);
  471. }
  472. const char *ext4_decode_error(struct super_block *sb, int errno,
  473. char nbuf[16])
  474. {
  475. char *errstr = NULL;
  476. switch (errno) {
  477. case -EFSCORRUPTED:
  478. errstr = "Corrupt filesystem";
  479. break;
  480. case -EFSBADCRC:
  481. errstr = "Filesystem failed CRC";
  482. break;
  483. case -EIO:
  484. errstr = "IO failure";
  485. break;
  486. case -ENOMEM:
  487. errstr = "Out of memory";
  488. break;
  489. case -EROFS:
  490. if (!sb || (EXT4_SB(sb)->s_journal &&
  491. EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
  492. errstr = "Journal has aborted";
  493. else
  494. errstr = "Readonly filesystem";
  495. break;
  496. default:
  497. /* If the caller passed in an extra buffer for unknown
  498. * errors, textualise them now. Else we just return
  499. * NULL. */
  500. if (nbuf) {
  501. /* Check for truncated error codes... */
  502. if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
  503. errstr = nbuf;
  504. }
  505. break;
  506. }
  507. return errstr;
  508. }
  509. /* __ext4_std_error decodes expected errors from journaling functions
  510. * automatically and invokes the appropriate error response. */
  511. void __ext4_std_error(struct super_block *sb, const char *function,
  512. unsigned int line, int errno)
  513. {
  514. char nbuf[16];
  515. const char *errstr;
  516. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  517. return;
  518. /* Special case: if the error is EROFS, and we're not already
  519. * inside a transaction, then there's really no point in logging
  520. * an error. */
  521. if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
  522. return;
  523. if (ext4_error_ratelimit(sb)) {
  524. errstr = ext4_decode_error(sb, errno, nbuf);
  525. printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
  526. sb->s_id, function, line, errstr);
  527. }
  528. save_error_info(sb, function, line);
  529. ext4_handle_error(sb);
  530. }
  531. /*
  532. * ext4_abort is a much stronger failure handler than ext4_error. The
  533. * abort function may be used to deal with unrecoverable failures such
  534. * as journal IO errors or ENOMEM at a critical moment in log management.
  535. *
  536. * We unconditionally force the filesystem into an ABORT|READONLY state,
  537. * unless the error response on the fs has been set to panic in which
  538. * case we take the easy way out and panic immediately.
  539. */
  540. void __ext4_abort(struct super_block *sb, const char *function,
  541. unsigned int line, const char *fmt, ...)
  542. {
  543. struct va_format vaf;
  544. va_list args;
  545. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  546. return;
  547. save_error_info(sb, function, line);
  548. va_start(args, fmt);
  549. vaf.fmt = fmt;
  550. vaf.va = &args;
  551. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
  552. sb->s_id, function, line, &vaf);
  553. va_end(args);
  554. if (sb_rdonly(sb) == 0) {
  555. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  556. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  557. /*
  558. * Make sure updated value of ->s_mount_flags will be visible
  559. * before ->s_flags update
  560. */
  561. smp_wmb();
  562. sb->s_flags |= SB_RDONLY;
  563. if (EXT4_SB(sb)->s_journal)
  564. jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
  565. save_error_info(sb, function, line);
  566. }
  567. if (test_opt(sb, ERRORS_PANIC)) {
  568. if (EXT4_SB(sb)->s_journal &&
  569. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  570. return;
  571. panic("EXT4-fs panic from previous error\n");
  572. }
  573. }
  574. void __ext4_msg(struct super_block *sb,
  575. const char *prefix, const char *fmt, ...)
  576. {
  577. struct va_format vaf;
  578. va_list args;
  579. if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
  580. return;
  581. va_start(args, fmt);
  582. vaf.fmt = fmt;
  583. vaf.va = &args;
  584. printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  585. va_end(args);
  586. }
  587. #define ext4_warning_ratelimit(sb) \
  588. ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
  589. "EXT4-fs warning")
  590. void __ext4_warning(struct super_block *sb, const char *function,
  591. unsigned int line, const char *fmt, ...)
  592. {
  593. struct va_format vaf;
  594. va_list args;
  595. if (!ext4_warning_ratelimit(sb))
  596. return;
  597. va_start(args, fmt);
  598. vaf.fmt = fmt;
  599. vaf.va = &args;
  600. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
  601. sb->s_id, function, line, &vaf);
  602. va_end(args);
  603. }
  604. void __ext4_warning_inode(const struct inode *inode, const char *function,
  605. unsigned int line, const char *fmt, ...)
  606. {
  607. struct va_format vaf;
  608. va_list args;
  609. if (!ext4_warning_ratelimit(inode->i_sb))
  610. return;
  611. va_start(args, fmt);
  612. vaf.fmt = fmt;
  613. vaf.va = &args;
  614. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
  615. "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
  616. function, line, inode->i_ino, current->comm, &vaf);
  617. va_end(args);
  618. }
  619. void __ext4_grp_locked_error(const char *function, unsigned int line,
  620. struct super_block *sb, ext4_group_t grp,
  621. unsigned long ino, ext4_fsblk_t block,
  622. const char *fmt, ...)
  623. __releases(bitlock)
  624. __acquires(bitlock)
  625. {
  626. struct va_format vaf;
  627. va_list args;
  628. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  629. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  630. return;
  631. es->s_last_error_ino = cpu_to_le32(ino);
  632. es->s_last_error_block = cpu_to_le64(block);
  633. __save_error_info(sb, function, line);
  634. if (ext4_error_ratelimit(sb)) {
  635. va_start(args, fmt);
  636. vaf.fmt = fmt;
  637. vaf.va = &args;
  638. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
  639. sb->s_id, function, line, grp);
  640. if (ino)
  641. printk(KERN_CONT "inode %lu: ", ino);
  642. if (block)
  643. printk(KERN_CONT "block %llu:",
  644. (unsigned long long) block);
  645. printk(KERN_CONT "%pV\n", &vaf);
  646. va_end(args);
  647. }
  648. if (test_opt(sb, ERRORS_CONT)) {
  649. ext4_commit_super(sb, 0);
  650. return;
  651. }
  652. ext4_unlock_group(sb, grp);
  653. ext4_handle_error(sb);
  654. /*
  655. * We only get here in the ERRORS_RO case; relocking the group
  656. * may be dangerous, but nothing bad will happen since the
  657. * filesystem will have already been marked read/only and the
  658. * journal has been aborted. We return 1 as a hint to callers
  659. * who might what to use the return value from
  660. * ext4_grp_locked_error() to distinguish between the
  661. * ERRORS_CONT and ERRORS_RO case, and perhaps return more
  662. * aggressively from the ext4 function in question, with a
  663. * more appropriate error code.
  664. */
  665. ext4_lock_group(sb, grp);
  666. return;
  667. }
  668. void ext4_update_dynamic_rev(struct super_block *sb)
  669. {
  670. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  671. if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
  672. return;
  673. ext4_warning(sb,
  674. "updating to rev %d because of new feature flag, "
  675. "running e2fsck is recommended",
  676. EXT4_DYNAMIC_REV);
  677. es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
  678. es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
  679. es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
  680. /* leave es->s_feature_*compat flags alone */
  681. /* es->s_uuid will be set by e2fsck if empty */
  682. /*
  683. * The rest of the superblock fields should be zero, and if not it
  684. * means they are likely already in use, so leave them alone. We
  685. * can leave it up to e2fsck to clean up any inconsistencies there.
  686. */
  687. }
  688. /*
  689. * Open the external journal device
  690. */
  691. static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
  692. {
  693. struct block_device *bdev;
  694. char b[BDEVNAME_SIZE];
  695. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
  696. if (IS_ERR(bdev))
  697. goto fail;
  698. return bdev;
  699. fail:
  700. ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
  701. __bdevname(dev, b), PTR_ERR(bdev));
  702. return NULL;
  703. }
  704. /*
  705. * Release the journal device
  706. */
  707. static void ext4_blkdev_put(struct block_device *bdev)
  708. {
  709. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  710. }
  711. static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
  712. {
  713. struct block_device *bdev;
  714. bdev = sbi->journal_bdev;
  715. if (bdev) {
  716. ext4_blkdev_put(bdev);
  717. sbi->journal_bdev = NULL;
  718. }
  719. }
  720. static inline struct inode *orphan_list_entry(struct list_head *l)
  721. {
  722. return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
  723. }
  724. static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
  725. {
  726. struct list_head *l;
  727. ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
  728. le32_to_cpu(sbi->s_es->s_last_orphan));
  729. printk(KERN_ERR "sb_info orphan list:\n");
  730. list_for_each(l, &sbi->s_orphan) {
  731. struct inode *inode = orphan_list_entry(l);
  732. printk(KERN_ERR " "
  733. "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
  734. inode->i_sb->s_id, inode->i_ino, inode,
  735. inode->i_mode, inode->i_nlink,
  736. NEXT_ORPHAN(inode));
  737. }
  738. }
  739. #ifdef CONFIG_QUOTA
  740. static int ext4_quota_off(struct super_block *sb, int type);
  741. static inline void ext4_quota_off_umount(struct super_block *sb)
  742. {
  743. int type;
  744. /* Use our quota_off function to clear inode flags etc. */
  745. for (type = 0; type < EXT4_MAXQUOTAS; type++)
  746. ext4_quota_off(sb, type);
  747. }
  748. #else
  749. static inline void ext4_quota_off_umount(struct super_block *sb)
  750. {
  751. }
  752. #endif
  753. static void ext4_put_super(struct super_block *sb)
  754. {
  755. struct ext4_sb_info *sbi = EXT4_SB(sb);
  756. struct ext4_super_block *es = sbi->s_es;
  757. int aborted = 0;
  758. int i, err;
  759. ext4_unregister_li_request(sb);
  760. ext4_quota_off_umount(sb);
  761. flush_workqueue(sbi->rsv_conversion_wq);
  762. destroy_workqueue(sbi->rsv_conversion_wq);
  763. if (sbi->s_journal) {
  764. aborted = is_journal_aborted(sbi->s_journal);
  765. err = jbd2_journal_destroy(sbi->s_journal);
  766. sbi->s_journal = NULL;
  767. if ((err < 0) && !aborted)
  768. ext4_abort(sb, "Couldn't clean up the journal");
  769. }
  770. ext4_unregister_sysfs(sb);
  771. ext4_es_unregister_shrinker(sbi);
  772. del_timer_sync(&sbi->s_err_report);
  773. ext4_release_system_zone(sb);
  774. ext4_mb_release(sb);
  775. ext4_ext_release(sb);
  776. if (!sb_rdonly(sb) && !aborted) {
  777. ext4_clear_feature_journal_needs_recovery(sb);
  778. es->s_state = cpu_to_le16(sbi->s_mount_state);
  779. }
  780. if (!sb_rdonly(sb))
  781. ext4_commit_super(sb, 1);
  782. for (i = 0; i < sbi->s_gdb_count; i++)
  783. brelse(sbi->s_group_desc[i]);
  784. kvfree(sbi->s_group_desc);
  785. kvfree(sbi->s_flex_groups);
  786. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  787. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  788. percpu_counter_destroy(&sbi->s_dirs_counter);
  789. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  790. percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
  791. #ifdef CONFIG_QUOTA
  792. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  793. kfree(sbi->s_qf_names[i]);
  794. #endif
  795. /* Debugging code just in case the in-memory inode orphan list
  796. * isn't empty. The on-disk one can be non-empty if we've
  797. * detected an error and taken the fs readonly, but the
  798. * in-memory list had better be clean by this point. */
  799. if (!list_empty(&sbi->s_orphan))
  800. dump_orphan_list(sb, sbi);
  801. J_ASSERT(list_empty(&sbi->s_orphan));
  802. sync_blockdev(sb->s_bdev);
  803. invalidate_bdev(sb->s_bdev);
  804. if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
  805. /*
  806. * Invalidate the journal device's buffers. We don't want them
  807. * floating about in memory - the physical journal device may
  808. * hotswapped, and it breaks the `ro-after' testing code.
  809. */
  810. sync_blockdev(sbi->journal_bdev);
  811. invalidate_bdev(sbi->journal_bdev);
  812. ext4_blkdev_remove(sbi);
  813. }
  814. if (sbi->s_ea_inode_cache) {
  815. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  816. sbi->s_ea_inode_cache = NULL;
  817. }
  818. if (sbi->s_ea_block_cache) {
  819. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  820. sbi->s_ea_block_cache = NULL;
  821. }
  822. if (sbi->s_mmp_tsk)
  823. kthread_stop(sbi->s_mmp_tsk);
  824. brelse(sbi->s_sbh);
  825. sb->s_fs_info = NULL;
  826. /*
  827. * Now that we are completely done shutting down the
  828. * superblock, we need to actually destroy the kobject.
  829. */
  830. kobject_put(&sbi->s_kobj);
  831. wait_for_completion(&sbi->s_kobj_unregister);
  832. if (sbi->s_chksum_driver)
  833. crypto_free_shash(sbi->s_chksum_driver);
  834. kfree(sbi->s_blockgroup_lock);
  835. fs_put_dax(sbi->s_daxdev);
  836. kfree(sbi);
  837. }
  838. static struct kmem_cache *ext4_inode_cachep;
  839. /*
  840. * Called inside transaction, so use GFP_NOFS
  841. */
  842. static struct inode *ext4_alloc_inode(struct super_block *sb)
  843. {
  844. struct ext4_inode_info *ei;
  845. ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
  846. if (!ei)
  847. return NULL;
  848. ei->vfs_inode.i_version = 1;
  849. spin_lock_init(&ei->i_raw_lock);
  850. INIT_LIST_HEAD(&ei->i_prealloc_list);
  851. spin_lock_init(&ei->i_prealloc_lock);
  852. ext4_es_init_tree(&ei->i_es_tree);
  853. rwlock_init(&ei->i_es_lock);
  854. INIT_LIST_HEAD(&ei->i_es_list);
  855. ei->i_es_all_nr = 0;
  856. ei->i_es_shk_nr = 0;
  857. ei->i_es_shrink_lblk = 0;
  858. ei->i_reserved_data_blocks = 0;
  859. ei->i_da_metadata_calc_len = 0;
  860. ei->i_da_metadata_calc_last_lblock = 0;
  861. spin_lock_init(&(ei->i_block_reservation_lock));
  862. #ifdef CONFIG_QUOTA
  863. ei->i_reserved_quota = 0;
  864. memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
  865. #endif
  866. ei->jinode = NULL;
  867. INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
  868. spin_lock_init(&ei->i_completed_io_lock);
  869. ei->i_sync_tid = 0;
  870. ei->i_datasync_tid = 0;
  871. atomic_set(&ei->i_unwritten, 0);
  872. INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
  873. return &ei->vfs_inode;
  874. }
  875. static int ext4_drop_inode(struct inode *inode)
  876. {
  877. int drop = generic_drop_inode(inode);
  878. trace_ext4_drop_inode(inode, drop);
  879. return drop;
  880. }
  881. static void ext4_i_callback(struct rcu_head *head)
  882. {
  883. struct inode *inode = container_of(head, struct inode, i_rcu);
  884. kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
  885. }
  886. static void ext4_destroy_inode(struct inode *inode)
  887. {
  888. if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
  889. ext4_msg(inode->i_sb, KERN_ERR,
  890. "Inode %lu (%p): orphan list check failed!",
  891. inode->i_ino, EXT4_I(inode));
  892. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
  893. EXT4_I(inode), sizeof(struct ext4_inode_info),
  894. true);
  895. dump_stack();
  896. }
  897. call_rcu(&inode->i_rcu, ext4_i_callback);
  898. }
  899. static void init_once(void *foo)
  900. {
  901. struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
  902. INIT_LIST_HEAD(&ei->i_orphan);
  903. init_rwsem(&ei->xattr_sem);
  904. init_rwsem(&ei->i_data_sem);
  905. init_rwsem(&ei->i_mmap_sem);
  906. inode_init_once(&ei->vfs_inode);
  907. }
  908. static int __init init_inodecache(void)
  909. {
  910. ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
  911. sizeof(struct ext4_inode_info),
  912. 0, (SLAB_RECLAIM_ACCOUNT|
  913. SLAB_MEM_SPREAD|SLAB_ACCOUNT),
  914. init_once);
  915. if (ext4_inode_cachep == NULL)
  916. return -ENOMEM;
  917. return 0;
  918. }
  919. static void destroy_inodecache(void)
  920. {
  921. /*
  922. * Make sure all delayed rcu free inodes are flushed before we
  923. * destroy cache.
  924. */
  925. rcu_barrier();
  926. kmem_cache_destroy(ext4_inode_cachep);
  927. }
  928. void ext4_clear_inode(struct inode *inode)
  929. {
  930. invalidate_inode_buffers(inode);
  931. clear_inode(inode);
  932. dquot_drop(inode);
  933. ext4_discard_preallocations(inode);
  934. ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
  935. if (EXT4_I(inode)->jinode) {
  936. jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
  937. EXT4_I(inode)->jinode);
  938. jbd2_free_inode(EXT4_I(inode)->jinode);
  939. EXT4_I(inode)->jinode = NULL;
  940. }
  941. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  942. fscrypt_put_encryption_info(inode, NULL);
  943. #endif
  944. }
  945. static struct inode *ext4_nfs_get_inode(struct super_block *sb,
  946. u64 ino, u32 generation)
  947. {
  948. struct inode *inode;
  949. if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
  950. return ERR_PTR(-ESTALE);
  951. if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
  952. return ERR_PTR(-ESTALE);
  953. /* iget isn't really right if the inode is currently unallocated!!
  954. *
  955. * ext4_read_inode will return a bad_inode if the inode had been
  956. * deleted, so we should be safe.
  957. *
  958. * Currently we don't know the generation for parent directory, so
  959. * a generation of 0 means "accept any"
  960. */
  961. inode = ext4_iget_normal(sb, ino);
  962. if (IS_ERR(inode))
  963. return ERR_CAST(inode);
  964. if (generation && inode->i_generation != generation) {
  965. iput(inode);
  966. return ERR_PTR(-ESTALE);
  967. }
  968. return inode;
  969. }
  970. static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
  971. int fh_len, int fh_type)
  972. {
  973. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  974. ext4_nfs_get_inode);
  975. }
  976. static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
  977. int fh_len, int fh_type)
  978. {
  979. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  980. ext4_nfs_get_inode);
  981. }
  982. /*
  983. * Try to release metadata pages (indirect blocks, directories) which are
  984. * mapped via the block device. Since these pages could have journal heads
  985. * which would prevent try_to_free_buffers() from freeing them, we must use
  986. * jbd2 layer's try_to_free_buffers() function to release them.
  987. */
  988. static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
  989. gfp_t wait)
  990. {
  991. journal_t *journal = EXT4_SB(sb)->s_journal;
  992. WARN_ON(PageChecked(page));
  993. if (!page_has_buffers(page))
  994. return 0;
  995. if (journal)
  996. return jbd2_journal_try_to_free_buffers(journal, page,
  997. wait & ~__GFP_DIRECT_RECLAIM);
  998. return try_to_free_buffers(page);
  999. }
  1000. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1001. static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
  1002. {
  1003. return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1004. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
  1005. }
  1006. static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
  1007. void *fs_data)
  1008. {
  1009. handle_t *handle = fs_data;
  1010. int res, res2, credits, retries = 0;
  1011. /*
  1012. * Encrypting the root directory is not allowed because e2fsck expects
  1013. * lost+found to exist and be unencrypted, and encrypting the root
  1014. * directory would imply encrypting the lost+found directory as well as
  1015. * the filename "lost+found" itself.
  1016. */
  1017. if (inode->i_ino == EXT4_ROOT_INO)
  1018. return -EPERM;
  1019. if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
  1020. return -EINVAL;
  1021. res = ext4_convert_inline_data(inode);
  1022. if (res)
  1023. return res;
  1024. /*
  1025. * If a journal handle was specified, then the encryption context is
  1026. * being set on a new inode via inheritance and is part of a larger
  1027. * transaction to create the inode. Otherwise the encryption context is
  1028. * being set on an existing inode in its own transaction. Only in the
  1029. * latter case should the "retry on ENOSPC" logic be used.
  1030. */
  1031. if (handle) {
  1032. res = ext4_xattr_set_handle(handle, inode,
  1033. EXT4_XATTR_INDEX_ENCRYPTION,
  1034. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1035. ctx, len, 0);
  1036. if (!res) {
  1037. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1038. ext4_clear_inode_state(inode,
  1039. EXT4_STATE_MAY_INLINE_DATA);
  1040. /*
  1041. * Update inode->i_flags - S_ENCRYPTED will be enabled,
  1042. * S_DAX may be disabled
  1043. */
  1044. ext4_set_inode_flags(inode);
  1045. }
  1046. return res;
  1047. }
  1048. res = dquot_initialize(inode);
  1049. if (res)
  1050. return res;
  1051. retry:
  1052. res = ext4_xattr_set_credits(inode, len, false /* is_create */,
  1053. &credits);
  1054. if (res)
  1055. return res;
  1056. handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
  1057. if (IS_ERR(handle))
  1058. return PTR_ERR(handle);
  1059. res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1060. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1061. ctx, len, 0);
  1062. if (!res) {
  1063. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1064. /*
  1065. * Update inode->i_flags - S_ENCRYPTED will be enabled,
  1066. * S_DAX may be disabled
  1067. */
  1068. ext4_set_inode_flags(inode);
  1069. res = ext4_mark_inode_dirty(handle, inode);
  1070. if (res)
  1071. EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
  1072. }
  1073. res2 = ext4_journal_stop(handle);
  1074. if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  1075. goto retry;
  1076. if (!res)
  1077. res = res2;
  1078. return res;
  1079. }
  1080. static bool ext4_dummy_context(struct inode *inode)
  1081. {
  1082. return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
  1083. }
  1084. static unsigned ext4_max_namelen(struct inode *inode)
  1085. {
  1086. return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
  1087. EXT4_NAME_LEN;
  1088. }
  1089. static const struct fscrypt_operations ext4_cryptops = {
  1090. .key_prefix = "ext4:",
  1091. .get_context = ext4_get_context,
  1092. .set_context = ext4_set_context,
  1093. .dummy_context = ext4_dummy_context,
  1094. .empty_dir = ext4_empty_dir,
  1095. .max_namelen = ext4_max_namelen,
  1096. };
  1097. #endif
  1098. #ifdef CONFIG_QUOTA
  1099. static const char * const quotatypes[] = INITQFNAMES;
  1100. #define QTYPE2NAME(t) (quotatypes[t])
  1101. static int ext4_write_dquot(struct dquot *dquot);
  1102. static int ext4_acquire_dquot(struct dquot *dquot);
  1103. static int ext4_release_dquot(struct dquot *dquot);
  1104. static int ext4_mark_dquot_dirty(struct dquot *dquot);
  1105. static int ext4_write_info(struct super_block *sb, int type);
  1106. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  1107. const struct path *path);
  1108. static int ext4_quota_on_mount(struct super_block *sb, int type);
  1109. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  1110. size_t len, loff_t off);
  1111. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  1112. const char *data, size_t len, loff_t off);
  1113. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  1114. unsigned int flags);
  1115. static int ext4_enable_quotas(struct super_block *sb);
  1116. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
  1117. static struct dquot **ext4_get_dquots(struct inode *inode)
  1118. {
  1119. return EXT4_I(inode)->i_dquot;
  1120. }
  1121. static const struct dquot_operations ext4_quota_operations = {
  1122. .get_reserved_space = ext4_get_reserved_space,
  1123. .write_dquot = ext4_write_dquot,
  1124. .acquire_dquot = ext4_acquire_dquot,
  1125. .release_dquot = ext4_release_dquot,
  1126. .mark_dirty = ext4_mark_dquot_dirty,
  1127. .write_info = ext4_write_info,
  1128. .alloc_dquot = dquot_alloc,
  1129. .destroy_dquot = dquot_destroy,
  1130. .get_projid = ext4_get_projid,
  1131. .get_inode_usage = ext4_get_inode_usage,
  1132. .get_next_id = ext4_get_next_id,
  1133. };
  1134. static const struct quotactl_ops ext4_qctl_operations = {
  1135. .quota_on = ext4_quota_on,
  1136. .quota_off = ext4_quota_off,
  1137. .quota_sync = dquot_quota_sync,
  1138. .get_state = dquot_get_state,
  1139. .set_info = dquot_set_dqinfo,
  1140. .get_dqblk = dquot_get_dqblk,
  1141. .set_dqblk = dquot_set_dqblk,
  1142. .get_nextdqblk = dquot_get_next_dqblk,
  1143. };
  1144. #endif
  1145. static const struct super_operations ext4_sops = {
  1146. .alloc_inode = ext4_alloc_inode,
  1147. .destroy_inode = ext4_destroy_inode,
  1148. .write_inode = ext4_write_inode,
  1149. .dirty_inode = ext4_dirty_inode,
  1150. .drop_inode = ext4_drop_inode,
  1151. .evict_inode = ext4_evict_inode,
  1152. .put_super = ext4_put_super,
  1153. .sync_fs = ext4_sync_fs,
  1154. .freeze_fs = ext4_freeze,
  1155. .unfreeze_fs = ext4_unfreeze,
  1156. .statfs = ext4_statfs,
  1157. .remount_fs = ext4_remount,
  1158. .show_options = ext4_show_options,
  1159. #ifdef CONFIG_QUOTA
  1160. .quota_read = ext4_quota_read,
  1161. .quota_write = ext4_quota_write,
  1162. .get_dquots = ext4_get_dquots,
  1163. #endif
  1164. .bdev_try_to_free_page = bdev_try_to_free_page,
  1165. };
  1166. static const struct export_operations ext4_export_ops = {
  1167. .fh_to_dentry = ext4_fh_to_dentry,
  1168. .fh_to_parent = ext4_fh_to_parent,
  1169. .get_parent = ext4_get_parent,
  1170. };
  1171. enum {
  1172. Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
  1173. Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
  1174. Opt_nouid32, Opt_debug, Opt_removed,
  1175. Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
  1176. Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
  1177. Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
  1178. Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
  1179. Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
  1180. Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
  1181. Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
  1182. Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
  1183. Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
  1184. Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
  1185. Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
  1186. Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
  1187. Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
  1188. Opt_inode_readahead_blks, Opt_journal_ioprio,
  1189. Opt_dioread_nolock, Opt_dioread_lock,
  1190. Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
  1191. Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
  1192. };
  1193. static const match_table_t tokens = {
  1194. {Opt_bsd_df, "bsddf"},
  1195. {Opt_minix_df, "minixdf"},
  1196. {Opt_grpid, "grpid"},
  1197. {Opt_grpid, "bsdgroups"},
  1198. {Opt_nogrpid, "nogrpid"},
  1199. {Opt_nogrpid, "sysvgroups"},
  1200. {Opt_resgid, "resgid=%u"},
  1201. {Opt_resuid, "resuid=%u"},
  1202. {Opt_sb, "sb=%u"},
  1203. {Opt_err_cont, "errors=continue"},
  1204. {Opt_err_panic, "errors=panic"},
  1205. {Opt_err_ro, "errors=remount-ro"},
  1206. {Opt_nouid32, "nouid32"},
  1207. {Opt_debug, "debug"},
  1208. {Opt_removed, "oldalloc"},
  1209. {Opt_removed, "orlov"},
  1210. {Opt_user_xattr, "user_xattr"},
  1211. {Opt_nouser_xattr, "nouser_xattr"},
  1212. {Opt_acl, "acl"},
  1213. {Opt_noacl, "noacl"},
  1214. {Opt_noload, "norecovery"},
  1215. {Opt_noload, "noload"},
  1216. {Opt_removed, "nobh"},
  1217. {Opt_removed, "bh"},
  1218. {Opt_commit, "commit=%u"},
  1219. {Opt_min_batch_time, "min_batch_time=%u"},
  1220. {Opt_max_batch_time, "max_batch_time=%u"},
  1221. {Opt_journal_dev, "journal_dev=%u"},
  1222. {Opt_journal_path, "journal_path=%s"},
  1223. {Opt_journal_checksum, "journal_checksum"},
  1224. {Opt_nojournal_checksum, "nojournal_checksum"},
  1225. {Opt_journal_async_commit, "journal_async_commit"},
  1226. {Opt_abort, "abort"},
  1227. {Opt_data_journal, "data=journal"},
  1228. {Opt_data_ordered, "data=ordered"},
  1229. {Opt_data_writeback, "data=writeback"},
  1230. {Opt_data_err_abort, "data_err=abort"},
  1231. {Opt_data_err_ignore, "data_err=ignore"},
  1232. {Opt_offusrjquota, "usrjquota="},
  1233. {Opt_usrjquota, "usrjquota=%s"},
  1234. {Opt_offgrpjquota, "grpjquota="},
  1235. {Opt_grpjquota, "grpjquota=%s"},
  1236. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  1237. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  1238. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  1239. {Opt_grpquota, "grpquota"},
  1240. {Opt_noquota, "noquota"},
  1241. {Opt_quota, "quota"},
  1242. {Opt_usrquota, "usrquota"},
  1243. {Opt_prjquota, "prjquota"},
  1244. {Opt_barrier, "barrier=%u"},
  1245. {Opt_barrier, "barrier"},
  1246. {Opt_nobarrier, "nobarrier"},
  1247. {Opt_i_version, "i_version"},
  1248. {Opt_dax, "dax"},
  1249. {Opt_stripe, "stripe=%u"},
  1250. {Opt_delalloc, "delalloc"},
  1251. {Opt_lazytime, "lazytime"},
  1252. {Opt_nolazytime, "nolazytime"},
  1253. {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
  1254. {Opt_nodelalloc, "nodelalloc"},
  1255. {Opt_removed, "mblk_io_submit"},
  1256. {Opt_removed, "nomblk_io_submit"},
  1257. {Opt_block_validity, "block_validity"},
  1258. {Opt_noblock_validity, "noblock_validity"},
  1259. {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
  1260. {Opt_journal_ioprio, "journal_ioprio=%u"},
  1261. {Opt_auto_da_alloc, "auto_da_alloc=%u"},
  1262. {Opt_auto_da_alloc, "auto_da_alloc"},
  1263. {Opt_noauto_da_alloc, "noauto_da_alloc"},
  1264. {Opt_dioread_nolock, "dioread_nolock"},
  1265. {Opt_dioread_lock, "dioread_lock"},
  1266. {Opt_discard, "discard"},
  1267. {Opt_nodiscard, "nodiscard"},
  1268. {Opt_init_itable, "init_itable=%u"},
  1269. {Opt_init_itable, "init_itable"},
  1270. {Opt_noinit_itable, "noinit_itable"},
  1271. {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
  1272. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  1273. {Opt_nombcache, "nombcache"},
  1274. {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
  1275. {Opt_removed, "check=none"}, /* mount option from ext2/3 */
  1276. {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
  1277. {Opt_removed, "reservation"}, /* mount option from ext2/3 */
  1278. {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
  1279. {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
  1280. {Opt_err, NULL},
  1281. };
  1282. static ext4_fsblk_t get_sb_block(void **data)
  1283. {
  1284. ext4_fsblk_t sb_block;
  1285. char *options = (char *) *data;
  1286. if (!options || strncmp(options, "sb=", 3) != 0)
  1287. return 1; /* Default location */
  1288. options += 3;
  1289. /* TODO: use simple_strtoll with >32bit ext4 */
  1290. sb_block = simple_strtoul(options, &options, 0);
  1291. if (*options && *options != ',') {
  1292. printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
  1293. (char *) *data);
  1294. return 1;
  1295. }
  1296. if (*options == ',')
  1297. options++;
  1298. *data = (void *) options;
  1299. return sb_block;
  1300. }
  1301. #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
  1302. static const char deprecated_msg[] =
  1303. "Mount option \"%s\" will be removed by %s\n"
  1304. "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
  1305. #ifdef CONFIG_QUOTA
  1306. static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
  1307. {
  1308. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1309. char *qname;
  1310. int ret = -1;
  1311. if (sb_any_quota_loaded(sb) &&
  1312. !sbi->s_qf_names[qtype]) {
  1313. ext4_msg(sb, KERN_ERR,
  1314. "Cannot change journaled "
  1315. "quota options when quota turned on");
  1316. return -1;
  1317. }
  1318. if (ext4_has_feature_quota(sb)) {
  1319. ext4_msg(sb, KERN_INFO, "Journaled quota options "
  1320. "ignored when QUOTA feature is enabled");
  1321. return 1;
  1322. }
  1323. qname = match_strdup(args);
  1324. if (!qname) {
  1325. ext4_msg(sb, KERN_ERR,
  1326. "Not enough memory for storing quotafile name");
  1327. return -1;
  1328. }
  1329. if (sbi->s_qf_names[qtype]) {
  1330. if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
  1331. ret = 1;
  1332. else
  1333. ext4_msg(sb, KERN_ERR,
  1334. "%s quota file already specified",
  1335. QTYPE2NAME(qtype));
  1336. goto errout;
  1337. }
  1338. if (strchr(qname, '/')) {
  1339. ext4_msg(sb, KERN_ERR,
  1340. "quotafile must be on filesystem root");
  1341. goto errout;
  1342. }
  1343. sbi->s_qf_names[qtype] = qname;
  1344. set_opt(sb, QUOTA);
  1345. return 1;
  1346. errout:
  1347. kfree(qname);
  1348. return ret;
  1349. }
  1350. static int clear_qf_name(struct super_block *sb, int qtype)
  1351. {
  1352. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1353. if (sb_any_quota_loaded(sb) &&
  1354. sbi->s_qf_names[qtype]) {
  1355. ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
  1356. " when quota turned on");
  1357. return -1;
  1358. }
  1359. kfree(sbi->s_qf_names[qtype]);
  1360. sbi->s_qf_names[qtype] = NULL;
  1361. return 1;
  1362. }
  1363. #endif
  1364. #define MOPT_SET 0x0001
  1365. #define MOPT_CLEAR 0x0002
  1366. #define MOPT_NOSUPPORT 0x0004
  1367. #define MOPT_EXPLICIT 0x0008
  1368. #define MOPT_CLEAR_ERR 0x0010
  1369. #define MOPT_GTE0 0x0020
  1370. #ifdef CONFIG_QUOTA
  1371. #define MOPT_Q 0
  1372. #define MOPT_QFMT 0x0040
  1373. #else
  1374. #define MOPT_Q MOPT_NOSUPPORT
  1375. #define MOPT_QFMT MOPT_NOSUPPORT
  1376. #endif
  1377. #define MOPT_DATAJ 0x0080
  1378. #define MOPT_NO_EXT2 0x0100
  1379. #define MOPT_NO_EXT3 0x0200
  1380. #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
  1381. #define MOPT_STRING 0x0400
  1382. static const struct mount_opts {
  1383. int token;
  1384. int mount_opt;
  1385. int flags;
  1386. } ext4_mount_opts[] = {
  1387. {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
  1388. {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
  1389. {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
  1390. {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
  1391. {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
  1392. {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
  1393. {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1394. MOPT_EXT4_ONLY | MOPT_SET},
  1395. {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1396. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1397. {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
  1398. {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
  1399. {Opt_delalloc, EXT4_MOUNT_DELALLOC,
  1400. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1401. {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
  1402. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1403. {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1404. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1405. {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1406. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1407. {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
  1408. EXT4_MOUNT_JOURNAL_CHECKSUM),
  1409. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1410. {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
  1411. {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
  1412. {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
  1413. {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
  1414. {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
  1415. MOPT_NO_EXT2},
  1416. {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
  1417. MOPT_NO_EXT2},
  1418. {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
  1419. {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
  1420. {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
  1421. {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
  1422. {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
  1423. {Opt_commit, 0, MOPT_GTE0},
  1424. {Opt_max_batch_time, 0, MOPT_GTE0},
  1425. {Opt_min_batch_time, 0, MOPT_GTE0},
  1426. {Opt_inode_readahead_blks, 0, MOPT_GTE0},
  1427. {Opt_init_itable, 0, MOPT_GTE0},
  1428. {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
  1429. {Opt_stripe, 0, MOPT_GTE0},
  1430. {Opt_resuid, 0, MOPT_GTE0},
  1431. {Opt_resgid, 0, MOPT_GTE0},
  1432. {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1433. {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
  1434. {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1435. {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1436. {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1437. {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
  1438. MOPT_NO_EXT2 | MOPT_DATAJ},
  1439. {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
  1440. {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
  1441. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  1442. {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
  1443. {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
  1444. #else
  1445. {Opt_acl, 0, MOPT_NOSUPPORT},
  1446. {Opt_noacl, 0, MOPT_NOSUPPORT},
  1447. #endif
  1448. {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
  1449. {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
  1450. {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
  1451. {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
  1452. {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
  1453. MOPT_SET | MOPT_Q},
  1454. {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
  1455. MOPT_SET | MOPT_Q},
  1456. {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
  1457. MOPT_SET | MOPT_Q},
  1458. {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
  1459. EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
  1460. MOPT_CLEAR | MOPT_Q},
  1461. {Opt_usrjquota, 0, MOPT_Q},
  1462. {Opt_grpjquota, 0, MOPT_Q},
  1463. {Opt_offusrjquota, 0, MOPT_Q},
  1464. {Opt_offgrpjquota, 0, MOPT_Q},
  1465. {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
  1466. {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
  1467. {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
  1468. {Opt_max_dir_size_kb, 0, MOPT_GTE0},
  1469. {Opt_test_dummy_encryption, 0, MOPT_GTE0},
  1470. {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
  1471. {Opt_err, 0, 0}
  1472. };
  1473. static int handle_mount_opt(struct super_block *sb, char *opt, int token,
  1474. substring_t *args, unsigned long *journal_devnum,
  1475. unsigned int *journal_ioprio, int is_remount)
  1476. {
  1477. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1478. const struct mount_opts *m;
  1479. kuid_t uid;
  1480. kgid_t gid;
  1481. int arg = 0;
  1482. #ifdef CONFIG_QUOTA
  1483. if (token == Opt_usrjquota)
  1484. return set_qf_name(sb, USRQUOTA, &args[0]);
  1485. else if (token == Opt_grpjquota)
  1486. return set_qf_name(sb, GRPQUOTA, &args[0]);
  1487. else if (token == Opt_offusrjquota)
  1488. return clear_qf_name(sb, USRQUOTA);
  1489. else if (token == Opt_offgrpjquota)
  1490. return clear_qf_name(sb, GRPQUOTA);
  1491. #endif
  1492. switch (token) {
  1493. case Opt_noacl:
  1494. case Opt_nouser_xattr:
  1495. ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
  1496. break;
  1497. case Opt_sb:
  1498. return 1; /* handled by get_sb_block() */
  1499. case Opt_removed:
  1500. ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
  1501. return 1;
  1502. case Opt_abort:
  1503. sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
  1504. return 1;
  1505. case Opt_i_version:
  1506. sb->s_flags |= SB_I_VERSION;
  1507. return 1;
  1508. case Opt_lazytime:
  1509. sb->s_flags |= SB_LAZYTIME;
  1510. return 1;
  1511. case Opt_nolazytime:
  1512. sb->s_flags &= ~SB_LAZYTIME;
  1513. return 1;
  1514. }
  1515. for (m = ext4_mount_opts; m->token != Opt_err; m++)
  1516. if (token == m->token)
  1517. break;
  1518. if (m->token == Opt_err) {
  1519. ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
  1520. "or missing value", opt);
  1521. return -1;
  1522. }
  1523. if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
  1524. ext4_msg(sb, KERN_ERR,
  1525. "Mount option \"%s\" incompatible with ext2", opt);
  1526. return -1;
  1527. }
  1528. if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
  1529. ext4_msg(sb, KERN_ERR,
  1530. "Mount option \"%s\" incompatible with ext3", opt);
  1531. return -1;
  1532. }
  1533. if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
  1534. return -1;
  1535. if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
  1536. return -1;
  1537. if (m->flags & MOPT_EXPLICIT) {
  1538. if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
  1539. set_opt2(sb, EXPLICIT_DELALLOC);
  1540. } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
  1541. set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
  1542. } else
  1543. return -1;
  1544. }
  1545. if (m->flags & MOPT_CLEAR_ERR)
  1546. clear_opt(sb, ERRORS_MASK);
  1547. if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
  1548. ext4_msg(sb, KERN_ERR, "Cannot change quota "
  1549. "options when quota turned on");
  1550. return -1;
  1551. }
  1552. if (m->flags & MOPT_NOSUPPORT) {
  1553. ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
  1554. } else if (token == Opt_commit) {
  1555. if (arg == 0)
  1556. arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
  1557. sbi->s_commit_interval = HZ * arg;
  1558. } else if (token == Opt_debug_want_extra_isize) {
  1559. sbi->s_want_extra_isize = arg;
  1560. } else if (token == Opt_max_batch_time) {
  1561. sbi->s_max_batch_time = arg;
  1562. } else if (token == Opt_min_batch_time) {
  1563. sbi->s_min_batch_time = arg;
  1564. } else if (token == Opt_inode_readahead_blks) {
  1565. if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
  1566. ext4_msg(sb, KERN_ERR,
  1567. "EXT4-fs: inode_readahead_blks must be "
  1568. "0 or a power of 2 smaller than 2^31");
  1569. return -1;
  1570. }
  1571. sbi->s_inode_readahead_blks = arg;
  1572. } else if (token == Opt_init_itable) {
  1573. set_opt(sb, INIT_INODE_TABLE);
  1574. if (!args->from)
  1575. arg = EXT4_DEF_LI_WAIT_MULT;
  1576. sbi->s_li_wait_mult = arg;
  1577. } else if (token == Opt_max_dir_size_kb) {
  1578. sbi->s_max_dir_size_kb = arg;
  1579. } else if (token == Opt_stripe) {
  1580. sbi->s_stripe = arg;
  1581. } else if (token == Opt_resuid) {
  1582. uid = make_kuid(current_user_ns(), arg);
  1583. if (!uid_valid(uid)) {
  1584. ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
  1585. return -1;
  1586. }
  1587. sbi->s_resuid = uid;
  1588. } else if (token == Opt_resgid) {
  1589. gid = make_kgid(current_user_ns(), arg);
  1590. if (!gid_valid(gid)) {
  1591. ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
  1592. return -1;
  1593. }
  1594. sbi->s_resgid = gid;
  1595. } else if (token == Opt_journal_dev) {
  1596. if (is_remount) {
  1597. ext4_msg(sb, KERN_ERR,
  1598. "Cannot specify journal on remount");
  1599. return -1;
  1600. }
  1601. *journal_devnum = arg;
  1602. } else if (token == Opt_journal_path) {
  1603. char *journal_path;
  1604. struct inode *journal_inode;
  1605. struct path path;
  1606. int error;
  1607. if (is_remount) {
  1608. ext4_msg(sb, KERN_ERR,
  1609. "Cannot specify journal on remount");
  1610. return -1;
  1611. }
  1612. journal_path = match_strdup(&args[0]);
  1613. if (!journal_path) {
  1614. ext4_msg(sb, KERN_ERR, "error: could not dup "
  1615. "journal device string");
  1616. return -1;
  1617. }
  1618. error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
  1619. if (error) {
  1620. ext4_msg(sb, KERN_ERR, "error: could not find "
  1621. "journal device path: error %d", error);
  1622. kfree(journal_path);
  1623. return -1;
  1624. }
  1625. journal_inode = d_inode(path.dentry);
  1626. if (!S_ISBLK(journal_inode->i_mode)) {
  1627. ext4_msg(sb, KERN_ERR, "error: journal path %s "
  1628. "is not a block device", journal_path);
  1629. path_put(&path);
  1630. kfree(journal_path);
  1631. return -1;
  1632. }
  1633. *journal_devnum = new_encode_dev(journal_inode->i_rdev);
  1634. path_put(&path);
  1635. kfree(journal_path);
  1636. } else if (token == Opt_journal_ioprio) {
  1637. if (arg > 7) {
  1638. ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
  1639. " (must be 0-7)");
  1640. return -1;
  1641. }
  1642. *journal_ioprio =
  1643. IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
  1644. } else if (token == Opt_test_dummy_encryption) {
  1645. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1646. sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
  1647. ext4_msg(sb, KERN_WARNING,
  1648. "Test dummy encryption mode enabled");
  1649. #else
  1650. ext4_msg(sb, KERN_WARNING,
  1651. "Test dummy encryption mount option ignored");
  1652. #endif
  1653. } else if (m->flags & MOPT_DATAJ) {
  1654. if (is_remount) {
  1655. if (!sbi->s_journal)
  1656. ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
  1657. else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
  1658. ext4_msg(sb, KERN_ERR,
  1659. "Cannot change data mode on remount");
  1660. return -1;
  1661. }
  1662. } else {
  1663. clear_opt(sb, DATA_FLAGS);
  1664. sbi->s_mount_opt |= m->mount_opt;
  1665. }
  1666. #ifdef CONFIG_QUOTA
  1667. } else if (m->flags & MOPT_QFMT) {
  1668. if (sb_any_quota_loaded(sb) &&
  1669. sbi->s_jquota_fmt != m->mount_opt) {
  1670. ext4_msg(sb, KERN_ERR, "Cannot change journaled "
  1671. "quota options when quota turned on");
  1672. return -1;
  1673. }
  1674. if (ext4_has_feature_quota(sb)) {
  1675. ext4_msg(sb, KERN_INFO,
  1676. "Quota format mount options ignored "
  1677. "when QUOTA feature is enabled");
  1678. return 1;
  1679. }
  1680. sbi->s_jquota_fmt = m->mount_opt;
  1681. #endif
  1682. } else if (token == Opt_dax) {
  1683. #ifdef CONFIG_FS_DAX
  1684. ext4_msg(sb, KERN_WARNING,
  1685. "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
  1686. sbi->s_mount_opt |= m->mount_opt;
  1687. #else
  1688. ext4_msg(sb, KERN_INFO, "dax option not supported");
  1689. return -1;
  1690. #endif
  1691. } else if (token == Opt_data_err_abort) {
  1692. sbi->s_mount_opt |= m->mount_opt;
  1693. } else if (token == Opt_data_err_ignore) {
  1694. sbi->s_mount_opt &= ~m->mount_opt;
  1695. } else {
  1696. if (!args->from)
  1697. arg = 1;
  1698. if (m->flags & MOPT_CLEAR)
  1699. arg = !arg;
  1700. else if (unlikely(!(m->flags & MOPT_SET))) {
  1701. ext4_msg(sb, KERN_WARNING,
  1702. "buggy handling of option %s", opt);
  1703. WARN_ON(1);
  1704. return -1;
  1705. }
  1706. if (arg != 0)
  1707. sbi->s_mount_opt |= m->mount_opt;
  1708. else
  1709. sbi->s_mount_opt &= ~m->mount_opt;
  1710. }
  1711. return 1;
  1712. }
  1713. static int parse_options(char *options, struct super_block *sb,
  1714. unsigned long *journal_devnum,
  1715. unsigned int *journal_ioprio,
  1716. int is_remount)
  1717. {
  1718. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1719. char *p;
  1720. substring_t args[MAX_OPT_ARGS];
  1721. int token;
  1722. if (!options)
  1723. return 1;
  1724. while ((p = strsep(&options, ",")) != NULL) {
  1725. if (!*p)
  1726. continue;
  1727. /*
  1728. * Initialize args struct so we know whether arg was
  1729. * found; some options take optional arguments.
  1730. */
  1731. args[0].to = args[0].from = NULL;
  1732. token = match_token(p, tokens, args);
  1733. if (handle_mount_opt(sb, p, token, args, journal_devnum,
  1734. journal_ioprio, is_remount) < 0)
  1735. return 0;
  1736. }
  1737. #ifdef CONFIG_QUOTA
  1738. /*
  1739. * We do the test below only for project quotas. 'usrquota' and
  1740. * 'grpquota' mount options are allowed even without quota feature
  1741. * to support legacy quotas in quota files.
  1742. */
  1743. if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
  1744. ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
  1745. "Cannot enable project quota enforcement.");
  1746. return 0;
  1747. }
  1748. if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  1749. if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
  1750. clear_opt(sb, USRQUOTA);
  1751. if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
  1752. clear_opt(sb, GRPQUOTA);
  1753. if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
  1754. ext4_msg(sb, KERN_ERR, "old and new quota "
  1755. "format mixing");
  1756. return 0;
  1757. }
  1758. if (!sbi->s_jquota_fmt) {
  1759. ext4_msg(sb, KERN_ERR, "journaled quota format "
  1760. "not specified");
  1761. return 0;
  1762. }
  1763. }
  1764. #endif
  1765. if (test_opt(sb, DIOREAD_NOLOCK)) {
  1766. int blocksize =
  1767. BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
  1768. if (blocksize < PAGE_SIZE) {
  1769. ext4_msg(sb, KERN_ERR, "can't mount with "
  1770. "dioread_nolock if block size != PAGE_SIZE");
  1771. return 0;
  1772. }
  1773. }
  1774. return 1;
  1775. }
  1776. static inline void ext4_show_quota_options(struct seq_file *seq,
  1777. struct super_block *sb)
  1778. {
  1779. #if defined(CONFIG_QUOTA)
  1780. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1781. if (sbi->s_jquota_fmt) {
  1782. char *fmtname = "";
  1783. switch (sbi->s_jquota_fmt) {
  1784. case QFMT_VFS_OLD:
  1785. fmtname = "vfsold";
  1786. break;
  1787. case QFMT_VFS_V0:
  1788. fmtname = "vfsv0";
  1789. break;
  1790. case QFMT_VFS_V1:
  1791. fmtname = "vfsv1";
  1792. break;
  1793. }
  1794. seq_printf(seq, ",jqfmt=%s", fmtname);
  1795. }
  1796. if (sbi->s_qf_names[USRQUOTA])
  1797. seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
  1798. if (sbi->s_qf_names[GRPQUOTA])
  1799. seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
  1800. #endif
  1801. }
  1802. static const char *token2str(int token)
  1803. {
  1804. const struct match_token *t;
  1805. for (t = tokens; t->token != Opt_err; t++)
  1806. if (t->token == token && !strchr(t->pattern, '='))
  1807. break;
  1808. return t->pattern;
  1809. }
  1810. /*
  1811. * Show an option if
  1812. * - it's set to a non-default value OR
  1813. * - if the per-sb default is different from the global default
  1814. */
  1815. static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
  1816. int nodefs)
  1817. {
  1818. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1819. struct ext4_super_block *es = sbi->s_es;
  1820. int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
  1821. const struct mount_opts *m;
  1822. char sep = nodefs ? '\n' : ',';
  1823. #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
  1824. #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
  1825. if (sbi->s_sb_block != 1)
  1826. SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
  1827. for (m = ext4_mount_opts; m->token != Opt_err; m++) {
  1828. int want_set = m->flags & MOPT_SET;
  1829. if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
  1830. (m->flags & MOPT_CLEAR_ERR))
  1831. continue;
  1832. if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
  1833. continue; /* skip if same as the default */
  1834. if ((want_set &&
  1835. (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
  1836. (!want_set && (sbi->s_mount_opt & m->mount_opt)))
  1837. continue; /* select Opt_noFoo vs Opt_Foo */
  1838. SEQ_OPTS_PRINT("%s", token2str(m->token));
  1839. }
  1840. if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
  1841. le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
  1842. SEQ_OPTS_PRINT("resuid=%u",
  1843. from_kuid_munged(&init_user_ns, sbi->s_resuid));
  1844. if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
  1845. le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
  1846. SEQ_OPTS_PRINT("resgid=%u",
  1847. from_kgid_munged(&init_user_ns, sbi->s_resgid));
  1848. def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
  1849. if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
  1850. SEQ_OPTS_PUTS("errors=remount-ro");
  1851. if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
  1852. SEQ_OPTS_PUTS("errors=continue");
  1853. if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
  1854. SEQ_OPTS_PUTS("errors=panic");
  1855. if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
  1856. SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
  1857. if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
  1858. SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
  1859. if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
  1860. SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
  1861. if (sb->s_flags & SB_I_VERSION)
  1862. SEQ_OPTS_PUTS("i_version");
  1863. if (nodefs || sbi->s_stripe)
  1864. SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
  1865. if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
  1866. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  1867. SEQ_OPTS_PUTS("data=journal");
  1868. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  1869. SEQ_OPTS_PUTS("data=ordered");
  1870. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
  1871. SEQ_OPTS_PUTS("data=writeback");
  1872. }
  1873. if (nodefs ||
  1874. sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
  1875. SEQ_OPTS_PRINT("inode_readahead_blks=%u",
  1876. sbi->s_inode_readahead_blks);
  1877. if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
  1878. (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
  1879. SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
  1880. if (nodefs || sbi->s_max_dir_size_kb)
  1881. SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
  1882. if (test_opt(sb, DATA_ERR_ABORT))
  1883. SEQ_OPTS_PUTS("data_err=abort");
  1884. ext4_show_quota_options(seq, sb);
  1885. return 0;
  1886. }
  1887. static int ext4_show_options(struct seq_file *seq, struct dentry *root)
  1888. {
  1889. return _ext4_show_options(seq, root->d_sb, 0);
  1890. }
  1891. int ext4_seq_options_show(struct seq_file *seq, void *offset)
  1892. {
  1893. struct super_block *sb = seq->private;
  1894. int rc;
  1895. seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
  1896. rc = _ext4_show_options(seq, sb, 1);
  1897. seq_puts(seq, "\n");
  1898. return rc;
  1899. }
  1900. static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
  1901. int read_only)
  1902. {
  1903. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1904. int res = 0;
  1905. if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
  1906. ext4_msg(sb, KERN_ERR, "revision level too high, "
  1907. "forcing read-only mode");
  1908. res = SB_RDONLY;
  1909. }
  1910. if (read_only)
  1911. goto done;
  1912. if (!(sbi->s_mount_state & EXT4_VALID_FS))
  1913. ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
  1914. "running e2fsck is recommended");
  1915. else if (sbi->s_mount_state & EXT4_ERROR_FS)
  1916. ext4_msg(sb, KERN_WARNING,
  1917. "warning: mounting fs with errors, "
  1918. "running e2fsck is recommended");
  1919. else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
  1920. le16_to_cpu(es->s_mnt_count) >=
  1921. (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
  1922. ext4_msg(sb, KERN_WARNING,
  1923. "warning: maximal mount count reached, "
  1924. "running e2fsck is recommended");
  1925. else if (le32_to_cpu(es->s_checkinterval) &&
  1926. (le32_to_cpu(es->s_lastcheck) +
  1927. le32_to_cpu(es->s_checkinterval) <= get_seconds()))
  1928. ext4_msg(sb, KERN_WARNING,
  1929. "warning: checktime reached, "
  1930. "running e2fsck is recommended");
  1931. if (!sbi->s_journal)
  1932. es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  1933. if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
  1934. es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
  1935. le16_add_cpu(&es->s_mnt_count, 1);
  1936. es->s_mtime = cpu_to_le32(get_seconds());
  1937. ext4_update_dynamic_rev(sb);
  1938. if (sbi->s_journal)
  1939. ext4_set_feature_journal_needs_recovery(sb);
  1940. ext4_commit_super(sb, 1);
  1941. done:
  1942. if (test_opt(sb, DEBUG))
  1943. printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
  1944. "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
  1945. sb->s_blocksize,
  1946. sbi->s_groups_count,
  1947. EXT4_BLOCKS_PER_GROUP(sb),
  1948. EXT4_INODES_PER_GROUP(sb),
  1949. sbi->s_mount_opt, sbi->s_mount_opt2);
  1950. cleancache_init_fs(sb);
  1951. return res;
  1952. }
  1953. int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
  1954. {
  1955. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1956. struct flex_groups *new_groups;
  1957. int size;
  1958. if (!sbi->s_log_groups_per_flex)
  1959. return 0;
  1960. size = ext4_flex_group(sbi, ngroup - 1) + 1;
  1961. if (size <= sbi->s_flex_groups_allocated)
  1962. return 0;
  1963. size = roundup_pow_of_two(size * sizeof(struct flex_groups));
  1964. new_groups = kvzalloc(size, GFP_KERNEL);
  1965. if (!new_groups) {
  1966. ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
  1967. size / (int) sizeof(struct flex_groups));
  1968. return -ENOMEM;
  1969. }
  1970. if (sbi->s_flex_groups) {
  1971. memcpy(new_groups, sbi->s_flex_groups,
  1972. (sbi->s_flex_groups_allocated *
  1973. sizeof(struct flex_groups)));
  1974. kvfree(sbi->s_flex_groups);
  1975. }
  1976. sbi->s_flex_groups = new_groups;
  1977. sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
  1978. return 0;
  1979. }
  1980. static int ext4_fill_flex_info(struct super_block *sb)
  1981. {
  1982. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1983. struct ext4_group_desc *gdp = NULL;
  1984. ext4_group_t flex_group;
  1985. int i, err;
  1986. sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
  1987. if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
  1988. sbi->s_log_groups_per_flex = 0;
  1989. return 1;
  1990. }
  1991. err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
  1992. if (err)
  1993. goto failed;
  1994. for (i = 0; i < sbi->s_groups_count; i++) {
  1995. gdp = ext4_get_group_desc(sb, i, NULL);
  1996. flex_group = ext4_flex_group(sbi, i);
  1997. atomic_add(ext4_free_inodes_count(sb, gdp),
  1998. &sbi->s_flex_groups[flex_group].free_inodes);
  1999. atomic64_add(ext4_free_group_clusters(sb, gdp),
  2000. &sbi->s_flex_groups[flex_group].free_clusters);
  2001. atomic_add(ext4_used_dirs_count(sb, gdp),
  2002. &sbi->s_flex_groups[flex_group].used_dirs);
  2003. }
  2004. return 1;
  2005. failed:
  2006. return 0;
  2007. }
  2008. static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
  2009. struct ext4_group_desc *gdp)
  2010. {
  2011. int offset = offsetof(struct ext4_group_desc, bg_checksum);
  2012. __u16 crc = 0;
  2013. __le32 le_group = cpu_to_le32(block_group);
  2014. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2015. if (ext4_has_metadata_csum(sbi->s_sb)) {
  2016. /* Use new metadata_csum algorithm */
  2017. __u32 csum32;
  2018. __u16 dummy_csum = 0;
  2019. csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
  2020. sizeof(le_group));
  2021. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
  2022. csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
  2023. sizeof(dummy_csum));
  2024. offset += sizeof(dummy_csum);
  2025. if (offset < sbi->s_desc_size)
  2026. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
  2027. sbi->s_desc_size - offset);
  2028. crc = csum32 & 0xFFFF;
  2029. goto out;
  2030. }
  2031. /* old crc16 code */
  2032. if (!ext4_has_feature_gdt_csum(sb))
  2033. return 0;
  2034. crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
  2035. crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
  2036. crc = crc16(crc, (__u8 *)gdp, offset);
  2037. offset += sizeof(gdp->bg_checksum); /* skip checksum */
  2038. /* for checksum of struct ext4_group_desc do the rest...*/
  2039. if (ext4_has_feature_64bit(sb) &&
  2040. offset < le16_to_cpu(sbi->s_es->s_desc_size))
  2041. crc = crc16(crc, (__u8 *)gdp + offset,
  2042. le16_to_cpu(sbi->s_es->s_desc_size) -
  2043. offset);
  2044. out:
  2045. return cpu_to_le16(crc);
  2046. }
  2047. int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
  2048. struct ext4_group_desc *gdp)
  2049. {
  2050. if (ext4_has_group_desc_csum(sb) &&
  2051. (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
  2052. return 0;
  2053. return 1;
  2054. }
  2055. void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
  2056. struct ext4_group_desc *gdp)
  2057. {
  2058. if (!ext4_has_group_desc_csum(sb))
  2059. return;
  2060. gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
  2061. }
  2062. /* Called at mount-time, super-block is locked */
  2063. static int ext4_check_descriptors(struct super_block *sb,
  2064. ext4_fsblk_t sb_block,
  2065. ext4_group_t *first_not_zeroed)
  2066. {
  2067. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2068. ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
  2069. ext4_fsblk_t last_block;
  2070. ext4_fsblk_t block_bitmap;
  2071. ext4_fsblk_t inode_bitmap;
  2072. ext4_fsblk_t inode_table;
  2073. int flexbg_flag = 0;
  2074. ext4_group_t i, grp = sbi->s_groups_count;
  2075. if (ext4_has_feature_flex_bg(sb))
  2076. flexbg_flag = 1;
  2077. ext4_debug("Checking group descriptors");
  2078. for (i = 0; i < sbi->s_groups_count; i++) {
  2079. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  2080. if (i == sbi->s_groups_count - 1 || flexbg_flag)
  2081. last_block = ext4_blocks_count(sbi->s_es) - 1;
  2082. else
  2083. last_block = first_block +
  2084. (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  2085. if ((grp == sbi->s_groups_count) &&
  2086. !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2087. grp = i;
  2088. block_bitmap = ext4_block_bitmap(sb, gdp);
  2089. if (block_bitmap == sb_block) {
  2090. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2091. "Block bitmap for group %u overlaps "
  2092. "superblock", i);
  2093. }
  2094. if (block_bitmap < first_block || block_bitmap > last_block) {
  2095. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2096. "Block bitmap for group %u not in group "
  2097. "(block %llu)!", i, block_bitmap);
  2098. return 0;
  2099. }
  2100. inode_bitmap = ext4_inode_bitmap(sb, gdp);
  2101. if (inode_bitmap == sb_block) {
  2102. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2103. "Inode bitmap for group %u overlaps "
  2104. "superblock", i);
  2105. }
  2106. if (inode_bitmap < first_block || inode_bitmap > last_block) {
  2107. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2108. "Inode bitmap for group %u not in group "
  2109. "(block %llu)!", i, inode_bitmap);
  2110. return 0;
  2111. }
  2112. inode_table = ext4_inode_table(sb, gdp);
  2113. if (inode_table == sb_block) {
  2114. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2115. "Inode table for group %u overlaps "
  2116. "superblock", i);
  2117. }
  2118. if (inode_table < first_block ||
  2119. inode_table + sbi->s_itb_per_group - 1 > last_block) {
  2120. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2121. "Inode table for group %u not in group "
  2122. "(block %llu)!", i, inode_table);
  2123. return 0;
  2124. }
  2125. ext4_lock_group(sb, i);
  2126. if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
  2127. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2128. "Checksum for group %u failed (%u!=%u)",
  2129. i, le16_to_cpu(ext4_group_desc_csum(sb, i,
  2130. gdp)), le16_to_cpu(gdp->bg_checksum));
  2131. if (!sb_rdonly(sb)) {
  2132. ext4_unlock_group(sb, i);
  2133. return 0;
  2134. }
  2135. }
  2136. ext4_unlock_group(sb, i);
  2137. if (!flexbg_flag)
  2138. first_block += EXT4_BLOCKS_PER_GROUP(sb);
  2139. }
  2140. if (NULL != first_not_zeroed)
  2141. *first_not_zeroed = grp;
  2142. return 1;
  2143. }
  2144. /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
  2145. * the superblock) which were deleted from all directories, but held open by
  2146. * a process at the time of a crash. We walk the list and try to delete these
  2147. * inodes at recovery time (only with a read-write filesystem).
  2148. *
  2149. * In order to keep the orphan inode chain consistent during traversal (in
  2150. * case of crash during recovery), we link each inode into the superblock
  2151. * orphan list_head and handle it the same way as an inode deletion during
  2152. * normal operation (which journals the operations for us).
  2153. *
  2154. * We only do an iget() and an iput() on each inode, which is very safe if we
  2155. * accidentally point at an in-use or already deleted inode. The worst that
  2156. * can happen in this case is that we get a "bit already cleared" message from
  2157. * ext4_free_inode(). The only reason we would point at a wrong inode is if
  2158. * e2fsck was run on this filesystem, and it must have already done the orphan
  2159. * inode cleanup for us, so we can safely abort without any further action.
  2160. */
  2161. static void ext4_orphan_cleanup(struct super_block *sb,
  2162. struct ext4_super_block *es)
  2163. {
  2164. unsigned int s_flags = sb->s_flags;
  2165. int ret, nr_orphans = 0, nr_truncates = 0;
  2166. #ifdef CONFIG_QUOTA
  2167. int quota_update = 0;
  2168. int i;
  2169. #endif
  2170. if (!es->s_last_orphan) {
  2171. jbd_debug(4, "no orphan inodes to clean up\n");
  2172. return;
  2173. }
  2174. if (bdev_read_only(sb->s_bdev)) {
  2175. ext4_msg(sb, KERN_ERR, "write access "
  2176. "unavailable, skipping orphan cleanup");
  2177. return;
  2178. }
  2179. /* Check if feature set would not allow a r/w mount */
  2180. if (!ext4_feature_set_ok(sb, 0)) {
  2181. ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
  2182. "unknown ROCOMPAT features");
  2183. return;
  2184. }
  2185. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2186. /* don't clear list on RO mount w/ errors */
  2187. if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
  2188. ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
  2189. "clearing orphan list.\n");
  2190. es->s_last_orphan = 0;
  2191. }
  2192. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2193. return;
  2194. }
  2195. if (s_flags & SB_RDONLY) {
  2196. ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
  2197. sb->s_flags &= ~SB_RDONLY;
  2198. }
  2199. #ifdef CONFIG_QUOTA
  2200. /* Needed for iput() to work correctly and not trash data */
  2201. sb->s_flags |= SB_ACTIVE;
  2202. /*
  2203. * Turn on quotas which were not enabled for read-only mounts if
  2204. * filesystem has quota feature, so that they are updated correctly.
  2205. */
  2206. if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
  2207. int ret = ext4_enable_quotas(sb);
  2208. if (!ret)
  2209. quota_update = 1;
  2210. else
  2211. ext4_msg(sb, KERN_ERR,
  2212. "Cannot turn on quotas: error %d", ret);
  2213. }
  2214. /* Turn on journaled quotas used for old sytle */
  2215. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2216. if (EXT4_SB(sb)->s_qf_names[i]) {
  2217. int ret = ext4_quota_on_mount(sb, i);
  2218. if (!ret)
  2219. quota_update = 1;
  2220. else
  2221. ext4_msg(sb, KERN_ERR,
  2222. "Cannot turn on journaled "
  2223. "quota: type %d: error %d", i, ret);
  2224. }
  2225. }
  2226. #endif
  2227. while (es->s_last_orphan) {
  2228. struct inode *inode;
  2229. /*
  2230. * We may have encountered an error during cleanup; if
  2231. * so, skip the rest.
  2232. */
  2233. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2234. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2235. es->s_last_orphan = 0;
  2236. break;
  2237. }
  2238. inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
  2239. if (IS_ERR(inode)) {
  2240. es->s_last_orphan = 0;
  2241. break;
  2242. }
  2243. list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
  2244. dquot_initialize(inode);
  2245. if (inode->i_nlink) {
  2246. if (test_opt(sb, DEBUG))
  2247. ext4_msg(sb, KERN_DEBUG,
  2248. "%s: truncating inode %lu to %lld bytes",
  2249. __func__, inode->i_ino, inode->i_size);
  2250. jbd_debug(2, "truncating inode %lu to %lld bytes\n",
  2251. inode->i_ino, inode->i_size);
  2252. inode_lock(inode);
  2253. truncate_inode_pages(inode->i_mapping, inode->i_size);
  2254. ret = ext4_truncate(inode);
  2255. if (ret)
  2256. ext4_std_error(inode->i_sb, ret);
  2257. inode_unlock(inode);
  2258. nr_truncates++;
  2259. } else {
  2260. if (test_opt(sb, DEBUG))
  2261. ext4_msg(sb, KERN_DEBUG,
  2262. "%s: deleting unreferenced inode %lu",
  2263. __func__, inode->i_ino);
  2264. jbd_debug(2, "deleting unreferenced inode %lu\n",
  2265. inode->i_ino);
  2266. nr_orphans++;
  2267. }
  2268. iput(inode); /* The delete magic happens here! */
  2269. }
  2270. #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
  2271. if (nr_orphans)
  2272. ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
  2273. PLURAL(nr_orphans));
  2274. if (nr_truncates)
  2275. ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
  2276. PLURAL(nr_truncates));
  2277. #ifdef CONFIG_QUOTA
  2278. /* Turn off quotas if they were enabled for orphan cleanup */
  2279. if (quota_update) {
  2280. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2281. if (sb_dqopt(sb)->files[i])
  2282. dquot_quota_off(sb, i);
  2283. }
  2284. }
  2285. #endif
  2286. sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  2287. }
  2288. /*
  2289. * Maximal extent format file size.
  2290. * Resulting logical blkno at s_maxbytes must fit in our on-disk
  2291. * extent format containers, within a sector_t, and within i_blocks
  2292. * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
  2293. * so that won't be a limiting factor.
  2294. *
  2295. * However there is other limiting factor. We do store extents in the form
  2296. * of starting block and length, hence the resulting length of the extent
  2297. * covering maximum file size must fit into on-disk format containers as
  2298. * well. Given that length is always by 1 unit bigger than max unit (because
  2299. * we count 0 as well) we have to lower the s_maxbytes by one fs block.
  2300. *
  2301. * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  2302. */
  2303. static loff_t ext4_max_size(int blkbits, int has_huge_files)
  2304. {
  2305. loff_t res;
  2306. loff_t upper_limit = MAX_LFS_FILESIZE;
  2307. /* small i_blocks in vfs inode? */
  2308. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2309. /*
  2310. * CONFIG_LBDAF is not enabled implies the inode
  2311. * i_block represent total blocks in 512 bytes
  2312. * 32 == size of vfs inode i_blocks * 8
  2313. */
  2314. upper_limit = (1LL << 32) - 1;
  2315. /* total blocks in file system block size */
  2316. upper_limit >>= (blkbits - 9);
  2317. upper_limit <<= blkbits;
  2318. }
  2319. /*
  2320. * 32-bit extent-start container, ee_block. We lower the maxbytes
  2321. * by one fs block, so ee_len can cover the extent of maximum file
  2322. * size
  2323. */
  2324. res = (1LL << 32) - 1;
  2325. res <<= blkbits;
  2326. /* Sanity check against vm- & vfs- imposed limits */
  2327. if (res > upper_limit)
  2328. res = upper_limit;
  2329. return res;
  2330. }
  2331. /*
  2332. * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
  2333. * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
  2334. * We need to be 1 filesystem block less than the 2^48 sector limit.
  2335. */
  2336. static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
  2337. {
  2338. loff_t res = EXT4_NDIR_BLOCKS;
  2339. int meta_blocks;
  2340. loff_t upper_limit;
  2341. /* This is calculated to be the largest file size for a dense, block
  2342. * mapped file such that the file's total number of 512-byte sectors,
  2343. * including data and all indirect blocks, does not exceed (2^48 - 1).
  2344. *
  2345. * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
  2346. * number of 512-byte sectors of the file.
  2347. */
  2348. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2349. /*
  2350. * !has_huge_files or CONFIG_LBDAF not enabled implies that
  2351. * the inode i_block field represents total file blocks in
  2352. * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
  2353. */
  2354. upper_limit = (1LL << 32) - 1;
  2355. /* total blocks in file system block size */
  2356. upper_limit >>= (bits - 9);
  2357. } else {
  2358. /*
  2359. * We use 48 bit ext4_inode i_blocks
  2360. * With EXT4_HUGE_FILE_FL set the i_blocks
  2361. * represent total number of blocks in
  2362. * file system block size
  2363. */
  2364. upper_limit = (1LL << 48) - 1;
  2365. }
  2366. /* indirect blocks */
  2367. meta_blocks = 1;
  2368. /* double indirect blocks */
  2369. meta_blocks += 1 + (1LL << (bits-2));
  2370. /* tripple indirect blocks */
  2371. meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
  2372. upper_limit -= meta_blocks;
  2373. upper_limit <<= bits;
  2374. res += 1LL << (bits-2);
  2375. res += 1LL << (2*(bits-2));
  2376. res += 1LL << (3*(bits-2));
  2377. res <<= bits;
  2378. if (res > upper_limit)
  2379. res = upper_limit;
  2380. if (res > MAX_LFS_FILESIZE)
  2381. res = MAX_LFS_FILESIZE;
  2382. return res;
  2383. }
  2384. static ext4_fsblk_t descriptor_loc(struct super_block *sb,
  2385. ext4_fsblk_t logical_sb_block, int nr)
  2386. {
  2387. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2388. ext4_group_t bg, first_meta_bg;
  2389. int has_super = 0;
  2390. first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
  2391. if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
  2392. return logical_sb_block + nr + 1;
  2393. bg = sbi->s_desc_per_block * nr;
  2394. if (ext4_bg_has_super(sb, bg))
  2395. has_super = 1;
  2396. /*
  2397. * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
  2398. * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
  2399. * on modern mke2fs or blksize > 1k on older mke2fs) then we must
  2400. * compensate.
  2401. */
  2402. if (sb->s_blocksize == 1024 && nr == 0 &&
  2403. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
  2404. has_super++;
  2405. return (has_super + ext4_group_first_block_no(sb, bg));
  2406. }
  2407. /**
  2408. * ext4_get_stripe_size: Get the stripe size.
  2409. * @sbi: In memory super block info
  2410. *
  2411. * If we have specified it via mount option, then
  2412. * use the mount option value. If the value specified at mount time is
  2413. * greater than the blocks per group use the super block value.
  2414. * If the super block value is greater than blocks per group return 0.
  2415. * Allocator needs it be less than blocks per group.
  2416. *
  2417. */
  2418. static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
  2419. {
  2420. unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
  2421. unsigned long stripe_width =
  2422. le32_to_cpu(sbi->s_es->s_raid_stripe_width);
  2423. int ret;
  2424. if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
  2425. ret = sbi->s_stripe;
  2426. else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
  2427. ret = stripe_width;
  2428. else if (stride && stride <= sbi->s_blocks_per_group)
  2429. ret = stride;
  2430. else
  2431. ret = 0;
  2432. /*
  2433. * If the stripe width is 1, this makes no sense and
  2434. * we set it to 0 to turn off stripe handling code.
  2435. */
  2436. if (ret <= 1)
  2437. ret = 0;
  2438. return ret;
  2439. }
  2440. /*
  2441. * Check whether this filesystem can be mounted based on
  2442. * the features present and the RDONLY/RDWR mount requested.
  2443. * Returns 1 if this filesystem can be mounted as requested,
  2444. * 0 if it cannot be.
  2445. */
  2446. static int ext4_feature_set_ok(struct super_block *sb, int readonly)
  2447. {
  2448. if (ext4_has_unknown_ext4_incompat_features(sb)) {
  2449. ext4_msg(sb, KERN_ERR,
  2450. "Couldn't mount because of "
  2451. "unsupported optional features (%x)",
  2452. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
  2453. ~EXT4_FEATURE_INCOMPAT_SUPP));
  2454. return 0;
  2455. }
  2456. if (readonly)
  2457. return 1;
  2458. if (ext4_has_feature_readonly(sb)) {
  2459. ext4_msg(sb, KERN_INFO, "filesystem is read-only");
  2460. sb->s_flags |= SB_RDONLY;
  2461. return 1;
  2462. }
  2463. /* Check that feature set is OK for a read-write mount */
  2464. if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
  2465. ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
  2466. "unsupported optional features (%x)",
  2467. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
  2468. ~EXT4_FEATURE_RO_COMPAT_SUPP));
  2469. return 0;
  2470. }
  2471. /*
  2472. * Large file size enabled file system can only be mounted
  2473. * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
  2474. */
  2475. if (ext4_has_feature_huge_file(sb)) {
  2476. if (sizeof(blkcnt_t) < sizeof(u64)) {
  2477. ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
  2478. "cannot be mounted RDWR without "
  2479. "CONFIG_LBDAF");
  2480. return 0;
  2481. }
  2482. }
  2483. if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
  2484. ext4_msg(sb, KERN_ERR,
  2485. "Can't support bigalloc feature without "
  2486. "extents feature\n");
  2487. return 0;
  2488. }
  2489. #ifndef CONFIG_QUOTA
  2490. if (ext4_has_feature_quota(sb) && !readonly) {
  2491. ext4_msg(sb, KERN_ERR,
  2492. "Filesystem with quota feature cannot be mounted RDWR "
  2493. "without CONFIG_QUOTA");
  2494. return 0;
  2495. }
  2496. if (ext4_has_feature_project(sb) && !readonly) {
  2497. ext4_msg(sb, KERN_ERR,
  2498. "Filesystem with project quota feature cannot be mounted RDWR "
  2499. "without CONFIG_QUOTA");
  2500. return 0;
  2501. }
  2502. #endif /* CONFIG_QUOTA */
  2503. return 1;
  2504. }
  2505. /*
  2506. * This function is called once a day if we have errors logged
  2507. * on the file system
  2508. */
  2509. static void print_daily_error_info(struct timer_list *t)
  2510. {
  2511. struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
  2512. struct super_block *sb = sbi->s_sb;
  2513. struct ext4_super_block *es = sbi->s_es;
  2514. if (es->s_error_count)
  2515. /* fsck newer than v1.41.13 is needed to clean this condition. */
  2516. ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
  2517. le32_to_cpu(es->s_error_count));
  2518. if (es->s_first_error_time) {
  2519. printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
  2520. sb->s_id, le32_to_cpu(es->s_first_error_time),
  2521. (int) sizeof(es->s_first_error_func),
  2522. es->s_first_error_func,
  2523. le32_to_cpu(es->s_first_error_line));
  2524. if (es->s_first_error_ino)
  2525. printk(KERN_CONT ": inode %u",
  2526. le32_to_cpu(es->s_first_error_ino));
  2527. if (es->s_first_error_block)
  2528. printk(KERN_CONT ": block %llu", (unsigned long long)
  2529. le64_to_cpu(es->s_first_error_block));
  2530. printk(KERN_CONT "\n");
  2531. }
  2532. if (es->s_last_error_time) {
  2533. printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
  2534. sb->s_id, le32_to_cpu(es->s_last_error_time),
  2535. (int) sizeof(es->s_last_error_func),
  2536. es->s_last_error_func,
  2537. le32_to_cpu(es->s_last_error_line));
  2538. if (es->s_last_error_ino)
  2539. printk(KERN_CONT ": inode %u",
  2540. le32_to_cpu(es->s_last_error_ino));
  2541. if (es->s_last_error_block)
  2542. printk(KERN_CONT ": block %llu", (unsigned long long)
  2543. le64_to_cpu(es->s_last_error_block));
  2544. printk(KERN_CONT "\n");
  2545. }
  2546. mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
  2547. }
  2548. /* Find next suitable group and run ext4_init_inode_table */
  2549. static int ext4_run_li_request(struct ext4_li_request *elr)
  2550. {
  2551. struct ext4_group_desc *gdp = NULL;
  2552. ext4_group_t group, ngroups;
  2553. struct super_block *sb;
  2554. unsigned long timeout = 0;
  2555. int ret = 0;
  2556. sb = elr->lr_super;
  2557. ngroups = EXT4_SB(sb)->s_groups_count;
  2558. for (group = elr->lr_next_group; group < ngroups; group++) {
  2559. gdp = ext4_get_group_desc(sb, group, NULL);
  2560. if (!gdp) {
  2561. ret = 1;
  2562. break;
  2563. }
  2564. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2565. break;
  2566. }
  2567. if (group >= ngroups)
  2568. ret = 1;
  2569. if (!ret) {
  2570. timeout = jiffies;
  2571. ret = ext4_init_inode_table(sb, group,
  2572. elr->lr_timeout ? 0 : 1);
  2573. if (elr->lr_timeout == 0) {
  2574. timeout = (jiffies - timeout) *
  2575. elr->lr_sbi->s_li_wait_mult;
  2576. elr->lr_timeout = timeout;
  2577. }
  2578. elr->lr_next_sched = jiffies + elr->lr_timeout;
  2579. elr->lr_next_group = group + 1;
  2580. }
  2581. return ret;
  2582. }
  2583. /*
  2584. * Remove lr_request from the list_request and free the
  2585. * request structure. Should be called with li_list_mtx held
  2586. */
  2587. static void ext4_remove_li_request(struct ext4_li_request *elr)
  2588. {
  2589. struct ext4_sb_info *sbi;
  2590. if (!elr)
  2591. return;
  2592. sbi = elr->lr_sbi;
  2593. list_del(&elr->lr_request);
  2594. sbi->s_li_request = NULL;
  2595. kfree(elr);
  2596. }
  2597. static void ext4_unregister_li_request(struct super_block *sb)
  2598. {
  2599. mutex_lock(&ext4_li_mtx);
  2600. if (!ext4_li_info) {
  2601. mutex_unlock(&ext4_li_mtx);
  2602. return;
  2603. }
  2604. mutex_lock(&ext4_li_info->li_list_mtx);
  2605. ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
  2606. mutex_unlock(&ext4_li_info->li_list_mtx);
  2607. mutex_unlock(&ext4_li_mtx);
  2608. }
  2609. static struct task_struct *ext4_lazyinit_task;
  2610. /*
  2611. * This is the function where ext4lazyinit thread lives. It walks
  2612. * through the request list searching for next scheduled filesystem.
  2613. * When such a fs is found, run the lazy initialization request
  2614. * (ext4_rn_li_request) and keep track of the time spend in this
  2615. * function. Based on that time we compute next schedule time of
  2616. * the request. When walking through the list is complete, compute
  2617. * next waking time and put itself into sleep.
  2618. */
  2619. static int ext4_lazyinit_thread(void *arg)
  2620. {
  2621. struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
  2622. struct list_head *pos, *n;
  2623. struct ext4_li_request *elr;
  2624. unsigned long next_wakeup, cur;
  2625. BUG_ON(NULL == eli);
  2626. cont_thread:
  2627. while (true) {
  2628. next_wakeup = MAX_JIFFY_OFFSET;
  2629. mutex_lock(&eli->li_list_mtx);
  2630. if (list_empty(&eli->li_request_list)) {
  2631. mutex_unlock(&eli->li_list_mtx);
  2632. goto exit_thread;
  2633. }
  2634. list_for_each_safe(pos, n, &eli->li_request_list) {
  2635. int err = 0;
  2636. int progress = 0;
  2637. elr = list_entry(pos, struct ext4_li_request,
  2638. lr_request);
  2639. if (time_before(jiffies, elr->lr_next_sched)) {
  2640. if (time_before(elr->lr_next_sched, next_wakeup))
  2641. next_wakeup = elr->lr_next_sched;
  2642. continue;
  2643. }
  2644. if (down_read_trylock(&elr->lr_super->s_umount)) {
  2645. if (sb_start_write_trylock(elr->lr_super)) {
  2646. progress = 1;
  2647. /*
  2648. * We hold sb->s_umount, sb can not
  2649. * be removed from the list, it is
  2650. * now safe to drop li_list_mtx
  2651. */
  2652. mutex_unlock(&eli->li_list_mtx);
  2653. err = ext4_run_li_request(elr);
  2654. sb_end_write(elr->lr_super);
  2655. mutex_lock(&eli->li_list_mtx);
  2656. n = pos->next;
  2657. }
  2658. up_read((&elr->lr_super->s_umount));
  2659. }
  2660. /* error, remove the lazy_init job */
  2661. if (err) {
  2662. ext4_remove_li_request(elr);
  2663. continue;
  2664. }
  2665. if (!progress) {
  2666. elr->lr_next_sched = jiffies +
  2667. (prandom_u32()
  2668. % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2669. }
  2670. if (time_before(elr->lr_next_sched, next_wakeup))
  2671. next_wakeup = elr->lr_next_sched;
  2672. }
  2673. mutex_unlock(&eli->li_list_mtx);
  2674. try_to_freeze();
  2675. cur = jiffies;
  2676. if ((time_after_eq(cur, next_wakeup)) ||
  2677. (MAX_JIFFY_OFFSET == next_wakeup)) {
  2678. cond_resched();
  2679. continue;
  2680. }
  2681. schedule_timeout_interruptible(next_wakeup - cur);
  2682. if (kthread_should_stop()) {
  2683. ext4_clear_request_list();
  2684. goto exit_thread;
  2685. }
  2686. }
  2687. exit_thread:
  2688. /*
  2689. * It looks like the request list is empty, but we need
  2690. * to check it under the li_list_mtx lock, to prevent any
  2691. * additions into it, and of course we should lock ext4_li_mtx
  2692. * to atomically free the list and ext4_li_info, because at
  2693. * this point another ext4 filesystem could be registering
  2694. * new one.
  2695. */
  2696. mutex_lock(&ext4_li_mtx);
  2697. mutex_lock(&eli->li_list_mtx);
  2698. if (!list_empty(&eli->li_request_list)) {
  2699. mutex_unlock(&eli->li_list_mtx);
  2700. mutex_unlock(&ext4_li_mtx);
  2701. goto cont_thread;
  2702. }
  2703. mutex_unlock(&eli->li_list_mtx);
  2704. kfree(ext4_li_info);
  2705. ext4_li_info = NULL;
  2706. mutex_unlock(&ext4_li_mtx);
  2707. return 0;
  2708. }
  2709. static void ext4_clear_request_list(void)
  2710. {
  2711. struct list_head *pos, *n;
  2712. struct ext4_li_request *elr;
  2713. mutex_lock(&ext4_li_info->li_list_mtx);
  2714. list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
  2715. elr = list_entry(pos, struct ext4_li_request,
  2716. lr_request);
  2717. ext4_remove_li_request(elr);
  2718. }
  2719. mutex_unlock(&ext4_li_info->li_list_mtx);
  2720. }
  2721. static int ext4_run_lazyinit_thread(void)
  2722. {
  2723. ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
  2724. ext4_li_info, "ext4lazyinit");
  2725. if (IS_ERR(ext4_lazyinit_task)) {
  2726. int err = PTR_ERR(ext4_lazyinit_task);
  2727. ext4_clear_request_list();
  2728. kfree(ext4_li_info);
  2729. ext4_li_info = NULL;
  2730. printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
  2731. "initialization thread\n",
  2732. err);
  2733. return err;
  2734. }
  2735. ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
  2736. return 0;
  2737. }
  2738. /*
  2739. * Check whether it make sense to run itable init. thread or not.
  2740. * If there is at least one uninitialized inode table, return
  2741. * corresponding group number, else the loop goes through all
  2742. * groups and return total number of groups.
  2743. */
  2744. static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
  2745. {
  2746. ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
  2747. struct ext4_group_desc *gdp = NULL;
  2748. for (group = 0; group < ngroups; group++) {
  2749. gdp = ext4_get_group_desc(sb, group, NULL);
  2750. if (!gdp)
  2751. continue;
  2752. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2753. break;
  2754. }
  2755. return group;
  2756. }
  2757. static int ext4_li_info_new(void)
  2758. {
  2759. struct ext4_lazy_init *eli = NULL;
  2760. eli = kzalloc(sizeof(*eli), GFP_KERNEL);
  2761. if (!eli)
  2762. return -ENOMEM;
  2763. INIT_LIST_HEAD(&eli->li_request_list);
  2764. mutex_init(&eli->li_list_mtx);
  2765. eli->li_state |= EXT4_LAZYINIT_QUIT;
  2766. ext4_li_info = eli;
  2767. return 0;
  2768. }
  2769. static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
  2770. ext4_group_t start)
  2771. {
  2772. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2773. struct ext4_li_request *elr;
  2774. elr = kzalloc(sizeof(*elr), GFP_KERNEL);
  2775. if (!elr)
  2776. return NULL;
  2777. elr->lr_super = sb;
  2778. elr->lr_sbi = sbi;
  2779. elr->lr_next_group = start;
  2780. /*
  2781. * Randomize first schedule time of the request to
  2782. * spread the inode table initialization requests
  2783. * better.
  2784. */
  2785. elr->lr_next_sched = jiffies + (prandom_u32() %
  2786. (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2787. return elr;
  2788. }
  2789. int ext4_register_li_request(struct super_block *sb,
  2790. ext4_group_t first_not_zeroed)
  2791. {
  2792. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2793. struct ext4_li_request *elr = NULL;
  2794. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  2795. int ret = 0;
  2796. mutex_lock(&ext4_li_mtx);
  2797. if (sbi->s_li_request != NULL) {
  2798. /*
  2799. * Reset timeout so it can be computed again, because
  2800. * s_li_wait_mult might have changed.
  2801. */
  2802. sbi->s_li_request->lr_timeout = 0;
  2803. goto out;
  2804. }
  2805. if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
  2806. !test_opt(sb, INIT_INODE_TABLE))
  2807. goto out;
  2808. elr = ext4_li_request_new(sb, first_not_zeroed);
  2809. if (!elr) {
  2810. ret = -ENOMEM;
  2811. goto out;
  2812. }
  2813. if (NULL == ext4_li_info) {
  2814. ret = ext4_li_info_new();
  2815. if (ret)
  2816. goto out;
  2817. }
  2818. mutex_lock(&ext4_li_info->li_list_mtx);
  2819. list_add(&elr->lr_request, &ext4_li_info->li_request_list);
  2820. mutex_unlock(&ext4_li_info->li_list_mtx);
  2821. sbi->s_li_request = elr;
  2822. /*
  2823. * set elr to NULL here since it has been inserted to
  2824. * the request_list and the removal and free of it is
  2825. * handled by ext4_clear_request_list from now on.
  2826. */
  2827. elr = NULL;
  2828. if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
  2829. ret = ext4_run_lazyinit_thread();
  2830. if (ret)
  2831. goto out;
  2832. }
  2833. out:
  2834. mutex_unlock(&ext4_li_mtx);
  2835. if (ret)
  2836. kfree(elr);
  2837. return ret;
  2838. }
  2839. /*
  2840. * We do not need to lock anything since this is called on
  2841. * module unload.
  2842. */
  2843. static void ext4_destroy_lazyinit_thread(void)
  2844. {
  2845. /*
  2846. * If thread exited earlier
  2847. * there's nothing to be done.
  2848. */
  2849. if (!ext4_li_info || !ext4_lazyinit_task)
  2850. return;
  2851. kthread_stop(ext4_lazyinit_task);
  2852. }
  2853. static int set_journal_csum_feature_set(struct super_block *sb)
  2854. {
  2855. int ret = 1;
  2856. int compat, incompat;
  2857. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2858. if (ext4_has_metadata_csum(sb)) {
  2859. /* journal checksum v3 */
  2860. compat = 0;
  2861. incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
  2862. } else {
  2863. /* journal checksum v1 */
  2864. compat = JBD2_FEATURE_COMPAT_CHECKSUM;
  2865. incompat = 0;
  2866. }
  2867. jbd2_journal_clear_features(sbi->s_journal,
  2868. JBD2_FEATURE_COMPAT_CHECKSUM, 0,
  2869. JBD2_FEATURE_INCOMPAT_CSUM_V3 |
  2870. JBD2_FEATURE_INCOMPAT_CSUM_V2);
  2871. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  2872. ret = jbd2_journal_set_features(sbi->s_journal,
  2873. compat, 0,
  2874. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
  2875. incompat);
  2876. } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
  2877. ret = jbd2_journal_set_features(sbi->s_journal,
  2878. compat, 0,
  2879. incompat);
  2880. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2881. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2882. } else {
  2883. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2884. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2885. }
  2886. return ret;
  2887. }
  2888. /*
  2889. * Note: calculating the overhead so we can be compatible with
  2890. * historical BSD practice is quite difficult in the face of
  2891. * clusters/bigalloc. This is because multiple metadata blocks from
  2892. * different block group can end up in the same allocation cluster.
  2893. * Calculating the exact overhead in the face of clustered allocation
  2894. * requires either O(all block bitmaps) in memory or O(number of block
  2895. * groups**2) in time. We will still calculate the superblock for
  2896. * older file systems --- and if we come across with a bigalloc file
  2897. * system with zero in s_overhead_clusters the estimate will be close to
  2898. * correct especially for very large cluster sizes --- but for newer
  2899. * file systems, it's better to calculate this figure once at mkfs
  2900. * time, and store it in the superblock. If the superblock value is
  2901. * present (even for non-bigalloc file systems), we will use it.
  2902. */
  2903. static int count_overhead(struct super_block *sb, ext4_group_t grp,
  2904. char *buf)
  2905. {
  2906. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2907. struct ext4_group_desc *gdp;
  2908. ext4_fsblk_t first_block, last_block, b;
  2909. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2910. int s, j, count = 0;
  2911. if (!ext4_has_feature_bigalloc(sb))
  2912. return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
  2913. sbi->s_itb_per_group + 2);
  2914. first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
  2915. (grp * EXT4_BLOCKS_PER_GROUP(sb));
  2916. last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
  2917. for (i = 0; i < ngroups; i++) {
  2918. gdp = ext4_get_group_desc(sb, i, NULL);
  2919. b = ext4_block_bitmap(sb, gdp);
  2920. if (b >= first_block && b <= last_block) {
  2921. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2922. count++;
  2923. }
  2924. b = ext4_inode_bitmap(sb, gdp);
  2925. if (b >= first_block && b <= last_block) {
  2926. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2927. count++;
  2928. }
  2929. b = ext4_inode_table(sb, gdp);
  2930. if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
  2931. for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
  2932. int c = EXT4_B2C(sbi, b - first_block);
  2933. ext4_set_bit(c, buf);
  2934. count++;
  2935. }
  2936. if (i != grp)
  2937. continue;
  2938. s = 0;
  2939. if (ext4_bg_has_super(sb, grp)) {
  2940. ext4_set_bit(s++, buf);
  2941. count++;
  2942. }
  2943. j = ext4_bg_num_gdb(sb, grp);
  2944. if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
  2945. ext4_error(sb, "Invalid number of block group "
  2946. "descriptor blocks: %d", j);
  2947. j = EXT4_BLOCKS_PER_GROUP(sb) - s;
  2948. }
  2949. count += j;
  2950. for (; j > 0; j--)
  2951. ext4_set_bit(EXT4_B2C(sbi, s++), buf);
  2952. }
  2953. if (!count)
  2954. return 0;
  2955. return EXT4_CLUSTERS_PER_GROUP(sb) -
  2956. ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
  2957. }
  2958. /*
  2959. * Compute the overhead and stash it in sbi->s_overhead
  2960. */
  2961. int ext4_calculate_overhead(struct super_block *sb)
  2962. {
  2963. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2964. struct ext4_super_block *es = sbi->s_es;
  2965. struct inode *j_inode;
  2966. unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
  2967. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2968. ext4_fsblk_t overhead = 0;
  2969. char *buf = (char *) get_zeroed_page(GFP_NOFS);
  2970. if (!buf)
  2971. return -ENOMEM;
  2972. /*
  2973. * Compute the overhead (FS structures). This is constant
  2974. * for a given filesystem unless the number of block groups
  2975. * changes so we cache the previous value until it does.
  2976. */
  2977. /*
  2978. * All of the blocks before first_data_block are overhead
  2979. */
  2980. overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
  2981. /*
  2982. * Add the overhead found in each block group
  2983. */
  2984. for (i = 0; i < ngroups; i++) {
  2985. int blks;
  2986. blks = count_overhead(sb, i, buf);
  2987. overhead += blks;
  2988. if (blks)
  2989. memset(buf, 0, PAGE_SIZE);
  2990. cond_resched();
  2991. }
  2992. /*
  2993. * Add the internal journal blocks whether the journal has been
  2994. * loaded or not
  2995. */
  2996. if (sbi->s_journal && !sbi->journal_bdev)
  2997. overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
  2998. else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
  2999. j_inode = ext4_get_journal_inode(sb, j_inum);
  3000. if (j_inode) {
  3001. j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
  3002. overhead += EXT4_NUM_B2C(sbi, j_blocks);
  3003. iput(j_inode);
  3004. } else {
  3005. ext4_msg(sb, KERN_ERR, "can't get journal size");
  3006. }
  3007. }
  3008. sbi->s_overhead = overhead;
  3009. smp_wmb();
  3010. free_page((unsigned long) buf);
  3011. return 0;
  3012. }
  3013. static void ext4_set_resv_clusters(struct super_block *sb)
  3014. {
  3015. ext4_fsblk_t resv_clusters;
  3016. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3017. /*
  3018. * There's no need to reserve anything when we aren't using extents.
  3019. * The space estimates are exact, there are no unwritten extents,
  3020. * hole punching doesn't need new metadata... This is needed especially
  3021. * to keep ext2/3 backward compatibility.
  3022. */
  3023. if (!ext4_has_feature_extents(sb))
  3024. return;
  3025. /*
  3026. * By default we reserve 2% or 4096 clusters, whichever is smaller.
  3027. * This should cover the situations where we can not afford to run
  3028. * out of space like for example punch hole, or converting
  3029. * unwritten extents in delalloc path. In most cases such
  3030. * allocation would require 1, or 2 blocks, higher numbers are
  3031. * very rare.
  3032. */
  3033. resv_clusters = (ext4_blocks_count(sbi->s_es) >>
  3034. sbi->s_cluster_bits);
  3035. do_div(resv_clusters, 50);
  3036. resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
  3037. atomic64_set(&sbi->s_resv_clusters, resv_clusters);
  3038. }
  3039. static int ext4_fill_super(struct super_block *sb, void *data, int silent)
  3040. {
  3041. struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
  3042. char *orig_data = kstrdup(data, GFP_KERNEL);
  3043. struct buffer_head *bh;
  3044. struct ext4_super_block *es = NULL;
  3045. struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
  3046. ext4_fsblk_t block;
  3047. ext4_fsblk_t sb_block = get_sb_block(&data);
  3048. ext4_fsblk_t logical_sb_block;
  3049. unsigned long offset = 0;
  3050. unsigned long journal_devnum = 0;
  3051. unsigned long def_mount_opts;
  3052. struct inode *root;
  3053. const char *descr;
  3054. int ret = -ENOMEM;
  3055. int blocksize, clustersize;
  3056. unsigned int db_count;
  3057. unsigned int i;
  3058. int needs_recovery, has_huge_files, has_bigalloc;
  3059. __u64 blocks_count;
  3060. int err = 0;
  3061. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  3062. ext4_group_t first_not_zeroed;
  3063. if ((data && !orig_data) || !sbi)
  3064. goto out_free_base;
  3065. sbi->s_daxdev = dax_dev;
  3066. sbi->s_blockgroup_lock =
  3067. kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
  3068. if (!sbi->s_blockgroup_lock)
  3069. goto out_free_base;
  3070. sb->s_fs_info = sbi;
  3071. sbi->s_sb = sb;
  3072. sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
  3073. sbi->s_sb_block = sb_block;
  3074. if (sb->s_bdev->bd_part)
  3075. sbi->s_sectors_written_start =
  3076. part_stat_read(sb->s_bdev->bd_part, sectors[1]);
  3077. /* Cleanup superblock name */
  3078. strreplace(sb->s_id, '/', '!');
  3079. /* -EINVAL is default */
  3080. ret = -EINVAL;
  3081. blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
  3082. if (!blocksize) {
  3083. ext4_msg(sb, KERN_ERR, "unable to set blocksize");
  3084. goto out_fail;
  3085. }
  3086. /*
  3087. * The ext4 superblock will not be buffer aligned for other than 1kB
  3088. * block sizes. We need to calculate the offset from buffer start.
  3089. */
  3090. if (blocksize != EXT4_MIN_BLOCK_SIZE) {
  3091. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3092. offset = do_div(logical_sb_block, blocksize);
  3093. } else {
  3094. logical_sb_block = sb_block;
  3095. }
  3096. if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
  3097. ext4_msg(sb, KERN_ERR, "unable to read superblock");
  3098. goto out_fail;
  3099. }
  3100. /*
  3101. * Note: s_es must be initialized as soon as possible because
  3102. * some ext4 macro-instructions depend on its value
  3103. */
  3104. es = (struct ext4_super_block *) (bh->b_data + offset);
  3105. sbi->s_es = es;
  3106. sb->s_magic = le16_to_cpu(es->s_magic);
  3107. if (sb->s_magic != EXT4_SUPER_MAGIC)
  3108. goto cantfind_ext4;
  3109. sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
  3110. /* Warn if metadata_csum and gdt_csum are both set. */
  3111. if (ext4_has_feature_metadata_csum(sb) &&
  3112. ext4_has_feature_gdt_csum(sb))
  3113. ext4_warning(sb, "metadata_csum and uninit_bg are "
  3114. "redundant flags; please run fsck.");
  3115. /* Check for a known checksum algorithm */
  3116. if (!ext4_verify_csum_type(sb, es)) {
  3117. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3118. "unknown checksum algorithm.");
  3119. silent = 1;
  3120. goto cantfind_ext4;
  3121. }
  3122. /* Load the checksum driver */
  3123. if (ext4_has_feature_metadata_csum(sb) ||
  3124. ext4_has_feature_ea_inode(sb)) {
  3125. sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
  3126. if (IS_ERR(sbi->s_chksum_driver)) {
  3127. ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
  3128. ret = PTR_ERR(sbi->s_chksum_driver);
  3129. sbi->s_chksum_driver = NULL;
  3130. goto failed_mount;
  3131. }
  3132. }
  3133. /* Check superblock checksum */
  3134. if (!ext4_superblock_csum_verify(sb, es)) {
  3135. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3136. "invalid superblock checksum. Run e2fsck?");
  3137. silent = 1;
  3138. ret = -EFSBADCRC;
  3139. goto cantfind_ext4;
  3140. }
  3141. /* Precompute checksum seed for all metadata */
  3142. if (ext4_has_feature_csum_seed(sb))
  3143. sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
  3144. else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
  3145. sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
  3146. sizeof(es->s_uuid));
  3147. /* Set defaults before we parse the mount options */
  3148. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  3149. set_opt(sb, INIT_INODE_TABLE);
  3150. if (def_mount_opts & EXT4_DEFM_DEBUG)
  3151. set_opt(sb, DEBUG);
  3152. if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
  3153. set_opt(sb, GRPID);
  3154. if (def_mount_opts & EXT4_DEFM_UID16)
  3155. set_opt(sb, NO_UID32);
  3156. /* xattr user namespace & acls are now defaulted on */
  3157. set_opt(sb, XATTR_USER);
  3158. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  3159. set_opt(sb, POSIX_ACL);
  3160. #endif
  3161. /* don't forget to enable journal_csum when metadata_csum is enabled. */
  3162. if (ext4_has_metadata_csum(sb))
  3163. set_opt(sb, JOURNAL_CHECKSUM);
  3164. if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
  3165. set_opt(sb, JOURNAL_DATA);
  3166. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
  3167. set_opt(sb, ORDERED_DATA);
  3168. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
  3169. set_opt(sb, WRITEBACK_DATA);
  3170. if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
  3171. set_opt(sb, ERRORS_PANIC);
  3172. else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
  3173. set_opt(sb, ERRORS_CONT);
  3174. else
  3175. set_opt(sb, ERRORS_RO);
  3176. /* block_validity enabled by default; disable with noblock_validity */
  3177. set_opt(sb, BLOCK_VALIDITY);
  3178. if (def_mount_opts & EXT4_DEFM_DISCARD)
  3179. set_opt(sb, DISCARD);
  3180. sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
  3181. sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
  3182. sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
  3183. sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
  3184. sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
  3185. if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
  3186. set_opt(sb, BARRIER);
  3187. /*
  3188. * enable delayed allocation by default
  3189. * Use -o nodelalloc to turn it off
  3190. */
  3191. if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
  3192. ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
  3193. set_opt(sb, DELALLOC);
  3194. /*
  3195. * set default s_li_wait_mult for lazyinit, for the case there is
  3196. * no mount option specified.
  3197. */
  3198. sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
  3199. if (sbi->s_es->s_mount_opts[0]) {
  3200. char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
  3201. sizeof(sbi->s_es->s_mount_opts),
  3202. GFP_KERNEL);
  3203. if (!s_mount_opts)
  3204. goto failed_mount;
  3205. if (!parse_options(s_mount_opts, sb, &journal_devnum,
  3206. &journal_ioprio, 0)) {
  3207. ext4_msg(sb, KERN_WARNING,
  3208. "failed to parse options in superblock: %s",
  3209. s_mount_opts);
  3210. }
  3211. kfree(s_mount_opts);
  3212. }
  3213. sbi->s_def_mount_opt = sbi->s_mount_opt;
  3214. if (!parse_options((char *) data, sb, &journal_devnum,
  3215. &journal_ioprio, 0))
  3216. goto failed_mount;
  3217. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  3218. printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
  3219. "with data=journal disables delayed "
  3220. "allocation and O_DIRECT support!\n");
  3221. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  3222. ext4_msg(sb, KERN_ERR, "can't mount with "
  3223. "both data=journal and delalloc");
  3224. goto failed_mount;
  3225. }
  3226. if (test_opt(sb, DIOREAD_NOLOCK)) {
  3227. ext4_msg(sb, KERN_ERR, "can't mount with "
  3228. "both data=journal and dioread_nolock");
  3229. goto failed_mount;
  3230. }
  3231. if (test_opt(sb, DAX)) {
  3232. ext4_msg(sb, KERN_ERR, "can't mount with "
  3233. "both data=journal and dax");
  3234. goto failed_mount;
  3235. }
  3236. if (ext4_has_feature_encrypt(sb)) {
  3237. ext4_msg(sb, KERN_WARNING,
  3238. "encrypted files will use data=ordered "
  3239. "instead of data journaling mode");
  3240. }
  3241. if (test_opt(sb, DELALLOC))
  3242. clear_opt(sb, DELALLOC);
  3243. } else {
  3244. sb->s_iflags |= SB_I_CGROUPWB;
  3245. }
  3246. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  3247. (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
  3248. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
  3249. (ext4_has_compat_features(sb) ||
  3250. ext4_has_ro_compat_features(sb) ||
  3251. ext4_has_incompat_features(sb)))
  3252. ext4_msg(sb, KERN_WARNING,
  3253. "feature flags set on rev 0 fs, "
  3254. "running e2fsck is recommended");
  3255. if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
  3256. set_opt2(sb, HURD_COMPAT);
  3257. if (ext4_has_feature_64bit(sb)) {
  3258. ext4_msg(sb, KERN_ERR,
  3259. "The Hurd can't support 64-bit file systems");
  3260. goto failed_mount;
  3261. }
  3262. /*
  3263. * ea_inode feature uses l_i_version field which is not
  3264. * available in HURD_COMPAT mode.
  3265. */
  3266. if (ext4_has_feature_ea_inode(sb)) {
  3267. ext4_msg(sb, KERN_ERR,
  3268. "ea_inode feature is not supported for Hurd");
  3269. goto failed_mount;
  3270. }
  3271. }
  3272. if (IS_EXT2_SB(sb)) {
  3273. if (ext2_feature_set_ok(sb))
  3274. ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
  3275. "using the ext4 subsystem");
  3276. else {
  3277. ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
  3278. "to feature incompatibilities");
  3279. goto failed_mount;
  3280. }
  3281. }
  3282. if (IS_EXT3_SB(sb)) {
  3283. if (ext3_feature_set_ok(sb))
  3284. ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
  3285. "using the ext4 subsystem");
  3286. else {
  3287. ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
  3288. "to feature incompatibilities");
  3289. goto failed_mount;
  3290. }
  3291. }
  3292. /*
  3293. * Check feature flags regardless of the revision level, since we
  3294. * previously didn't change the revision level when setting the flags,
  3295. * so there is a chance incompat flags are set on a rev 0 filesystem.
  3296. */
  3297. if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
  3298. goto failed_mount;
  3299. blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
  3300. if (blocksize < EXT4_MIN_BLOCK_SIZE ||
  3301. blocksize > EXT4_MAX_BLOCK_SIZE) {
  3302. ext4_msg(sb, KERN_ERR,
  3303. "Unsupported filesystem blocksize %d (%d log_block_size)",
  3304. blocksize, le32_to_cpu(es->s_log_block_size));
  3305. goto failed_mount;
  3306. }
  3307. if (le32_to_cpu(es->s_log_block_size) >
  3308. (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3309. ext4_msg(sb, KERN_ERR,
  3310. "Invalid log block size: %u",
  3311. le32_to_cpu(es->s_log_block_size));
  3312. goto failed_mount;
  3313. }
  3314. if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
  3315. ext4_msg(sb, KERN_ERR,
  3316. "Number of reserved GDT blocks insanely large: %d",
  3317. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
  3318. goto failed_mount;
  3319. }
  3320. if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
  3321. if (ext4_has_feature_inline_data(sb)) {
  3322. ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
  3323. " that may contain inline data");
  3324. goto failed_mount;
  3325. }
  3326. err = bdev_dax_supported(sb, blocksize);
  3327. if (err)
  3328. goto failed_mount;
  3329. }
  3330. if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
  3331. ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
  3332. es->s_encryption_level);
  3333. goto failed_mount;
  3334. }
  3335. if (sb->s_blocksize != blocksize) {
  3336. /* Validate the filesystem blocksize */
  3337. if (!sb_set_blocksize(sb, blocksize)) {
  3338. ext4_msg(sb, KERN_ERR, "bad block size %d",
  3339. blocksize);
  3340. goto failed_mount;
  3341. }
  3342. brelse(bh);
  3343. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3344. offset = do_div(logical_sb_block, blocksize);
  3345. bh = sb_bread_unmovable(sb, logical_sb_block);
  3346. if (!bh) {
  3347. ext4_msg(sb, KERN_ERR,
  3348. "Can't read superblock on 2nd try");
  3349. goto failed_mount;
  3350. }
  3351. es = (struct ext4_super_block *)(bh->b_data + offset);
  3352. sbi->s_es = es;
  3353. if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
  3354. ext4_msg(sb, KERN_ERR,
  3355. "Magic mismatch, very weird!");
  3356. goto failed_mount;
  3357. }
  3358. }
  3359. has_huge_files = ext4_has_feature_huge_file(sb);
  3360. sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
  3361. has_huge_files);
  3362. sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
  3363. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
  3364. sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
  3365. sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
  3366. } else {
  3367. sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
  3368. sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
  3369. if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
  3370. (!is_power_of_2(sbi->s_inode_size)) ||
  3371. (sbi->s_inode_size > blocksize)) {
  3372. ext4_msg(sb, KERN_ERR,
  3373. "unsupported inode size: %d",
  3374. sbi->s_inode_size);
  3375. goto failed_mount;
  3376. }
  3377. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
  3378. sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
  3379. }
  3380. sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
  3381. if (ext4_has_feature_64bit(sb)) {
  3382. if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
  3383. sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
  3384. !is_power_of_2(sbi->s_desc_size)) {
  3385. ext4_msg(sb, KERN_ERR,
  3386. "unsupported descriptor size %lu",
  3387. sbi->s_desc_size);
  3388. goto failed_mount;
  3389. }
  3390. } else
  3391. sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
  3392. sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
  3393. sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
  3394. sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
  3395. if (sbi->s_inodes_per_block == 0)
  3396. goto cantfind_ext4;
  3397. if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
  3398. sbi->s_inodes_per_group > blocksize * 8) {
  3399. ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
  3400. sbi->s_blocks_per_group);
  3401. goto failed_mount;
  3402. }
  3403. sbi->s_itb_per_group = sbi->s_inodes_per_group /
  3404. sbi->s_inodes_per_block;
  3405. sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
  3406. sbi->s_sbh = bh;
  3407. sbi->s_mount_state = le16_to_cpu(es->s_state);
  3408. sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
  3409. sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
  3410. for (i = 0; i < 4; i++)
  3411. sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
  3412. sbi->s_def_hash_version = es->s_def_hash_version;
  3413. if (ext4_has_feature_dir_index(sb)) {
  3414. i = le32_to_cpu(es->s_flags);
  3415. if (i & EXT2_FLAGS_UNSIGNED_HASH)
  3416. sbi->s_hash_unsigned = 3;
  3417. else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
  3418. #ifdef __CHAR_UNSIGNED__
  3419. if (!sb_rdonly(sb))
  3420. es->s_flags |=
  3421. cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
  3422. sbi->s_hash_unsigned = 3;
  3423. #else
  3424. if (!sb_rdonly(sb))
  3425. es->s_flags |=
  3426. cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
  3427. #endif
  3428. }
  3429. }
  3430. /* Handle clustersize */
  3431. clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
  3432. has_bigalloc = ext4_has_feature_bigalloc(sb);
  3433. if (has_bigalloc) {
  3434. if (clustersize < blocksize) {
  3435. ext4_msg(sb, KERN_ERR,
  3436. "cluster size (%d) smaller than "
  3437. "block size (%d)", clustersize, blocksize);
  3438. goto failed_mount;
  3439. }
  3440. if (le32_to_cpu(es->s_log_cluster_size) >
  3441. (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3442. ext4_msg(sb, KERN_ERR,
  3443. "Invalid log cluster size: %u",
  3444. le32_to_cpu(es->s_log_cluster_size));
  3445. goto failed_mount;
  3446. }
  3447. sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
  3448. le32_to_cpu(es->s_log_block_size);
  3449. sbi->s_clusters_per_group =
  3450. le32_to_cpu(es->s_clusters_per_group);
  3451. if (sbi->s_clusters_per_group > blocksize * 8) {
  3452. ext4_msg(sb, KERN_ERR,
  3453. "#clusters per group too big: %lu",
  3454. sbi->s_clusters_per_group);
  3455. goto failed_mount;
  3456. }
  3457. if (sbi->s_blocks_per_group !=
  3458. (sbi->s_clusters_per_group * (clustersize / blocksize))) {
  3459. ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
  3460. "clusters per group (%lu) inconsistent",
  3461. sbi->s_blocks_per_group,
  3462. sbi->s_clusters_per_group);
  3463. goto failed_mount;
  3464. }
  3465. } else {
  3466. if (clustersize != blocksize) {
  3467. ext4_warning(sb, "fragment/cluster size (%d) != "
  3468. "block size (%d)", clustersize,
  3469. blocksize);
  3470. clustersize = blocksize;
  3471. }
  3472. if (sbi->s_blocks_per_group > blocksize * 8) {
  3473. ext4_msg(sb, KERN_ERR,
  3474. "#blocks per group too big: %lu",
  3475. sbi->s_blocks_per_group);
  3476. goto failed_mount;
  3477. }
  3478. sbi->s_clusters_per_group = sbi->s_blocks_per_group;
  3479. sbi->s_cluster_bits = 0;
  3480. }
  3481. sbi->s_cluster_ratio = clustersize / blocksize;
  3482. /* Do we have standard group size of clustersize * 8 blocks ? */
  3483. if (sbi->s_blocks_per_group == clustersize << 3)
  3484. set_opt2(sb, STD_GROUP_SIZE);
  3485. /*
  3486. * Test whether we have more sectors than will fit in sector_t,
  3487. * and whether the max offset is addressable by the page cache.
  3488. */
  3489. err = generic_check_addressable(sb->s_blocksize_bits,
  3490. ext4_blocks_count(es));
  3491. if (err) {
  3492. ext4_msg(sb, KERN_ERR, "filesystem"
  3493. " too large to mount safely on this system");
  3494. if (sizeof(sector_t) < 8)
  3495. ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
  3496. goto failed_mount;
  3497. }
  3498. if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
  3499. goto cantfind_ext4;
  3500. /* check blocks count against device size */
  3501. blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
  3502. if (blocks_count && ext4_blocks_count(es) > blocks_count) {
  3503. ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
  3504. "exceeds size of device (%llu blocks)",
  3505. ext4_blocks_count(es), blocks_count);
  3506. goto failed_mount;
  3507. }
  3508. /*
  3509. * It makes no sense for the first data block to be beyond the end
  3510. * of the filesystem.
  3511. */
  3512. if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
  3513. ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
  3514. "block %u is beyond end of filesystem (%llu)",
  3515. le32_to_cpu(es->s_first_data_block),
  3516. ext4_blocks_count(es));
  3517. goto failed_mount;
  3518. }
  3519. blocks_count = (ext4_blocks_count(es) -
  3520. le32_to_cpu(es->s_first_data_block) +
  3521. EXT4_BLOCKS_PER_GROUP(sb) - 1);
  3522. do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
  3523. if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
  3524. ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
  3525. "(block count %llu, first data block %u, "
  3526. "blocks per group %lu)", sbi->s_groups_count,
  3527. ext4_blocks_count(es),
  3528. le32_to_cpu(es->s_first_data_block),
  3529. EXT4_BLOCKS_PER_GROUP(sb));
  3530. goto failed_mount;
  3531. }
  3532. sbi->s_groups_count = blocks_count;
  3533. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  3534. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  3535. db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
  3536. EXT4_DESC_PER_BLOCK(sb);
  3537. if (ext4_has_feature_meta_bg(sb)) {
  3538. if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
  3539. ext4_msg(sb, KERN_WARNING,
  3540. "first meta block group too large: %u "
  3541. "(group descriptor block count %u)",
  3542. le32_to_cpu(es->s_first_meta_bg), db_count);
  3543. goto failed_mount;
  3544. }
  3545. }
  3546. sbi->s_group_desc = kvmalloc(db_count *
  3547. sizeof(struct buffer_head *),
  3548. GFP_KERNEL);
  3549. if (sbi->s_group_desc == NULL) {
  3550. ext4_msg(sb, KERN_ERR, "not enough memory");
  3551. ret = -ENOMEM;
  3552. goto failed_mount;
  3553. }
  3554. bgl_lock_init(sbi->s_blockgroup_lock);
  3555. /* Pre-read the descriptors into the buffer cache */
  3556. for (i = 0; i < db_count; i++) {
  3557. block = descriptor_loc(sb, logical_sb_block, i);
  3558. sb_breadahead(sb, block);
  3559. }
  3560. for (i = 0; i < db_count; i++) {
  3561. block = descriptor_loc(sb, logical_sb_block, i);
  3562. sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
  3563. if (!sbi->s_group_desc[i]) {
  3564. ext4_msg(sb, KERN_ERR,
  3565. "can't read group descriptor %d", i);
  3566. db_count = i;
  3567. goto failed_mount2;
  3568. }
  3569. }
  3570. if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
  3571. ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
  3572. ret = -EFSCORRUPTED;
  3573. goto failed_mount2;
  3574. }
  3575. sbi->s_gdb_count = db_count;
  3576. timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
  3577. /* Register extent status tree shrinker */
  3578. if (ext4_es_register_shrinker(sbi))
  3579. goto failed_mount3;
  3580. sbi->s_stripe = ext4_get_stripe_size(sbi);
  3581. sbi->s_extent_max_zeroout_kb = 32;
  3582. /*
  3583. * set up enough so that it can read an inode
  3584. */
  3585. sb->s_op = &ext4_sops;
  3586. sb->s_export_op = &ext4_export_ops;
  3587. sb->s_xattr = ext4_xattr_handlers;
  3588. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  3589. sb->s_cop = &ext4_cryptops;
  3590. #endif
  3591. #ifdef CONFIG_QUOTA
  3592. sb->dq_op = &ext4_quota_operations;
  3593. if (ext4_has_feature_quota(sb))
  3594. sb->s_qcop = &dquot_quotactl_sysfile_ops;
  3595. else
  3596. sb->s_qcop = &ext4_qctl_operations;
  3597. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  3598. #endif
  3599. memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
  3600. INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
  3601. mutex_init(&sbi->s_orphan_lock);
  3602. sb->s_root = NULL;
  3603. needs_recovery = (es->s_last_orphan != 0 ||
  3604. ext4_has_feature_journal_needs_recovery(sb));
  3605. if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
  3606. if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
  3607. goto failed_mount3a;
  3608. /*
  3609. * The first inode we look at is the journal inode. Don't try
  3610. * root first: it may be modified in the journal!
  3611. */
  3612. if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
  3613. err = ext4_load_journal(sb, es, journal_devnum);
  3614. if (err)
  3615. goto failed_mount3a;
  3616. } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
  3617. ext4_has_feature_journal_needs_recovery(sb)) {
  3618. ext4_msg(sb, KERN_ERR, "required journal recovery "
  3619. "suppressed and not mounted read-only");
  3620. goto failed_mount_wq;
  3621. } else {
  3622. /* Nojournal mode, all journal mount options are illegal */
  3623. if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
  3624. ext4_msg(sb, KERN_ERR, "can't mount with "
  3625. "journal_checksum, fs mounted w/o journal");
  3626. goto failed_mount_wq;
  3627. }
  3628. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3629. ext4_msg(sb, KERN_ERR, "can't mount with "
  3630. "journal_async_commit, fs mounted w/o journal");
  3631. goto failed_mount_wq;
  3632. }
  3633. if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
  3634. ext4_msg(sb, KERN_ERR, "can't mount with "
  3635. "commit=%lu, fs mounted w/o journal",
  3636. sbi->s_commit_interval / HZ);
  3637. goto failed_mount_wq;
  3638. }
  3639. if (EXT4_MOUNT_DATA_FLAGS &
  3640. (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
  3641. ext4_msg(sb, KERN_ERR, "can't mount with "
  3642. "data=, fs mounted w/o journal");
  3643. goto failed_mount_wq;
  3644. }
  3645. sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
  3646. clear_opt(sb, JOURNAL_CHECKSUM);
  3647. clear_opt(sb, DATA_FLAGS);
  3648. sbi->s_journal = NULL;
  3649. needs_recovery = 0;
  3650. goto no_journal;
  3651. }
  3652. if (ext4_has_feature_64bit(sb) &&
  3653. !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
  3654. JBD2_FEATURE_INCOMPAT_64BIT)) {
  3655. ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
  3656. goto failed_mount_wq;
  3657. }
  3658. if (!set_journal_csum_feature_set(sb)) {
  3659. ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
  3660. "feature set");
  3661. goto failed_mount_wq;
  3662. }
  3663. /* We have now updated the journal if required, so we can
  3664. * validate the data journaling mode. */
  3665. switch (test_opt(sb, DATA_FLAGS)) {
  3666. case 0:
  3667. /* No mode set, assume a default based on the journal
  3668. * capabilities: ORDERED_DATA if the journal can
  3669. * cope, else JOURNAL_DATA
  3670. */
  3671. if (jbd2_journal_check_available_features
  3672. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
  3673. set_opt(sb, ORDERED_DATA);
  3674. else
  3675. set_opt(sb, JOURNAL_DATA);
  3676. break;
  3677. case EXT4_MOUNT_ORDERED_DATA:
  3678. case EXT4_MOUNT_WRITEBACK_DATA:
  3679. if (!jbd2_journal_check_available_features
  3680. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
  3681. ext4_msg(sb, KERN_ERR, "Journal does not support "
  3682. "requested data journaling mode");
  3683. goto failed_mount_wq;
  3684. }
  3685. default:
  3686. break;
  3687. }
  3688. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
  3689. test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3690. ext4_msg(sb, KERN_ERR, "can't mount with "
  3691. "journal_async_commit in data=ordered mode");
  3692. goto failed_mount_wq;
  3693. }
  3694. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  3695. sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
  3696. no_journal:
  3697. if (!test_opt(sb, NO_MBCACHE)) {
  3698. sbi->s_ea_block_cache = ext4_xattr_create_cache();
  3699. if (!sbi->s_ea_block_cache) {
  3700. ext4_msg(sb, KERN_ERR,
  3701. "Failed to create ea_block_cache");
  3702. goto failed_mount_wq;
  3703. }
  3704. if (ext4_has_feature_ea_inode(sb)) {
  3705. sbi->s_ea_inode_cache = ext4_xattr_create_cache();
  3706. if (!sbi->s_ea_inode_cache) {
  3707. ext4_msg(sb, KERN_ERR,
  3708. "Failed to create ea_inode_cache");
  3709. goto failed_mount_wq;
  3710. }
  3711. }
  3712. }
  3713. if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
  3714. (blocksize != PAGE_SIZE)) {
  3715. ext4_msg(sb, KERN_ERR,
  3716. "Unsupported blocksize for fs encryption");
  3717. goto failed_mount_wq;
  3718. }
  3719. if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
  3720. !ext4_has_feature_encrypt(sb)) {
  3721. ext4_set_feature_encrypt(sb);
  3722. ext4_commit_super(sb, 1);
  3723. }
  3724. /*
  3725. * Get the # of file system overhead blocks from the
  3726. * superblock if present.
  3727. */
  3728. if (es->s_overhead_clusters)
  3729. sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
  3730. else {
  3731. err = ext4_calculate_overhead(sb);
  3732. if (err)
  3733. goto failed_mount_wq;
  3734. }
  3735. /*
  3736. * The maximum number of concurrent works can be high and
  3737. * concurrency isn't really necessary. Limit it to 1.
  3738. */
  3739. EXT4_SB(sb)->rsv_conversion_wq =
  3740. alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
  3741. if (!EXT4_SB(sb)->rsv_conversion_wq) {
  3742. printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
  3743. ret = -ENOMEM;
  3744. goto failed_mount4;
  3745. }
  3746. /*
  3747. * The jbd2_journal_load will have done any necessary log recovery,
  3748. * so we can safely mount the rest of the filesystem now.
  3749. */
  3750. root = ext4_iget(sb, EXT4_ROOT_INO);
  3751. if (IS_ERR(root)) {
  3752. ext4_msg(sb, KERN_ERR, "get root inode failed");
  3753. ret = PTR_ERR(root);
  3754. root = NULL;
  3755. goto failed_mount4;
  3756. }
  3757. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  3758. ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
  3759. iput(root);
  3760. goto failed_mount4;
  3761. }
  3762. sb->s_root = d_make_root(root);
  3763. if (!sb->s_root) {
  3764. ext4_msg(sb, KERN_ERR, "get root dentry failed");
  3765. ret = -ENOMEM;
  3766. goto failed_mount4;
  3767. }
  3768. if (ext4_setup_super(sb, es, sb_rdonly(sb)))
  3769. sb->s_flags |= SB_RDONLY;
  3770. /* determine the minimum size of new large inodes, if present */
  3771. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
  3772. sbi->s_want_extra_isize == 0) {
  3773. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3774. EXT4_GOOD_OLD_INODE_SIZE;
  3775. if (ext4_has_feature_extra_isize(sb)) {
  3776. if (sbi->s_want_extra_isize <
  3777. le16_to_cpu(es->s_want_extra_isize))
  3778. sbi->s_want_extra_isize =
  3779. le16_to_cpu(es->s_want_extra_isize);
  3780. if (sbi->s_want_extra_isize <
  3781. le16_to_cpu(es->s_min_extra_isize))
  3782. sbi->s_want_extra_isize =
  3783. le16_to_cpu(es->s_min_extra_isize);
  3784. }
  3785. }
  3786. /* Check if enough inode space is available */
  3787. if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
  3788. sbi->s_inode_size) {
  3789. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3790. EXT4_GOOD_OLD_INODE_SIZE;
  3791. ext4_msg(sb, KERN_INFO, "required extra inode space not"
  3792. "available");
  3793. }
  3794. ext4_set_resv_clusters(sb);
  3795. err = ext4_setup_system_zone(sb);
  3796. if (err) {
  3797. ext4_msg(sb, KERN_ERR, "failed to initialize system "
  3798. "zone (%d)", err);
  3799. goto failed_mount4a;
  3800. }
  3801. ext4_ext_init(sb);
  3802. err = ext4_mb_init(sb);
  3803. if (err) {
  3804. ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
  3805. err);
  3806. goto failed_mount5;
  3807. }
  3808. block = ext4_count_free_clusters(sb);
  3809. ext4_free_blocks_count_set(sbi->s_es,
  3810. EXT4_C2B(sbi, block));
  3811. err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
  3812. GFP_KERNEL);
  3813. if (!err) {
  3814. unsigned long freei = ext4_count_free_inodes(sb);
  3815. sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
  3816. err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
  3817. GFP_KERNEL);
  3818. }
  3819. if (!err)
  3820. err = percpu_counter_init(&sbi->s_dirs_counter,
  3821. ext4_count_dirs(sb), GFP_KERNEL);
  3822. if (!err)
  3823. err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
  3824. GFP_KERNEL);
  3825. if (!err)
  3826. err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
  3827. if (err) {
  3828. ext4_msg(sb, KERN_ERR, "insufficient memory");
  3829. goto failed_mount6;
  3830. }
  3831. if (ext4_has_feature_flex_bg(sb))
  3832. if (!ext4_fill_flex_info(sb)) {
  3833. ext4_msg(sb, KERN_ERR,
  3834. "unable to initialize "
  3835. "flex_bg meta info!");
  3836. goto failed_mount6;
  3837. }
  3838. err = ext4_register_li_request(sb, first_not_zeroed);
  3839. if (err)
  3840. goto failed_mount6;
  3841. err = ext4_register_sysfs(sb);
  3842. if (err)
  3843. goto failed_mount7;
  3844. #ifdef CONFIG_QUOTA
  3845. /* Enable quota usage during mount. */
  3846. if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
  3847. err = ext4_enable_quotas(sb);
  3848. if (err)
  3849. goto failed_mount8;
  3850. }
  3851. #endif /* CONFIG_QUOTA */
  3852. EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
  3853. ext4_orphan_cleanup(sb, es);
  3854. EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
  3855. if (needs_recovery) {
  3856. ext4_msg(sb, KERN_INFO, "recovery complete");
  3857. ext4_mark_recovery_complete(sb, es);
  3858. }
  3859. if (EXT4_SB(sb)->s_journal) {
  3860. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  3861. descr = " journalled data mode";
  3862. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  3863. descr = " ordered data mode";
  3864. else
  3865. descr = " writeback data mode";
  3866. } else
  3867. descr = "out journal";
  3868. if (test_opt(sb, DISCARD)) {
  3869. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  3870. if (!blk_queue_discard(q))
  3871. ext4_msg(sb, KERN_WARNING,
  3872. "mounting with \"discard\" option, but "
  3873. "the device does not support discard");
  3874. }
  3875. if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
  3876. ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
  3877. "Opts: %.*s%s%s", descr,
  3878. (int) sizeof(sbi->s_es->s_mount_opts),
  3879. sbi->s_es->s_mount_opts,
  3880. *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
  3881. if (es->s_error_count)
  3882. mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
  3883. /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
  3884. ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
  3885. ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
  3886. ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
  3887. kfree(orig_data);
  3888. return 0;
  3889. cantfind_ext4:
  3890. if (!silent)
  3891. ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
  3892. goto failed_mount;
  3893. #ifdef CONFIG_QUOTA
  3894. failed_mount8:
  3895. ext4_unregister_sysfs(sb);
  3896. #endif
  3897. failed_mount7:
  3898. ext4_unregister_li_request(sb);
  3899. failed_mount6:
  3900. ext4_mb_release(sb);
  3901. if (sbi->s_flex_groups)
  3902. kvfree(sbi->s_flex_groups);
  3903. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  3904. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  3905. percpu_counter_destroy(&sbi->s_dirs_counter);
  3906. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  3907. failed_mount5:
  3908. ext4_ext_release(sb);
  3909. ext4_release_system_zone(sb);
  3910. failed_mount4a:
  3911. dput(sb->s_root);
  3912. sb->s_root = NULL;
  3913. failed_mount4:
  3914. ext4_msg(sb, KERN_ERR, "mount failed");
  3915. if (EXT4_SB(sb)->rsv_conversion_wq)
  3916. destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
  3917. failed_mount_wq:
  3918. if (sbi->s_ea_inode_cache) {
  3919. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  3920. sbi->s_ea_inode_cache = NULL;
  3921. }
  3922. if (sbi->s_ea_block_cache) {
  3923. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  3924. sbi->s_ea_block_cache = NULL;
  3925. }
  3926. if (sbi->s_journal) {
  3927. jbd2_journal_destroy(sbi->s_journal);
  3928. sbi->s_journal = NULL;
  3929. }
  3930. failed_mount3a:
  3931. ext4_es_unregister_shrinker(sbi);
  3932. failed_mount3:
  3933. del_timer_sync(&sbi->s_err_report);
  3934. if (sbi->s_mmp_tsk)
  3935. kthread_stop(sbi->s_mmp_tsk);
  3936. failed_mount2:
  3937. for (i = 0; i < db_count; i++)
  3938. brelse(sbi->s_group_desc[i]);
  3939. kvfree(sbi->s_group_desc);
  3940. failed_mount:
  3941. if (sbi->s_chksum_driver)
  3942. crypto_free_shash(sbi->s_chksum_driver);
  3943. #ifdef CONFIG_QUOTA
  3944. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  3945. kfree(sbi->s_qf_names[i]);
  3946. #endif
  3947. ext4_blkdev_remove(sbi);
  3948. brelse(bh);
  3949. out_fail:
  3950. sb->s_fs_info = NULL;
  3951. kfree(sbi->s_blockgroup_lock);
  3952. out_free_base:
  3953. kfree(sbi);
  3954. kfree(orig_data);
  3955. fs_put_dax(dax_dev);
  3956. return err ? err : ret;
  3957. }
  3958. /*
  3959. * Setup any per-fs journal parameters now. We'll do this both on
  3960. * initial mount, once the journal has been initialised but before we've
  3961. * done any recovery; and again on any subsequent remount.
  3962. */
  3963. static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
  3964. {
  3965. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3966. journal->j_commit_interval = sbi->s_commit_interval;
  3967. journal->j_min_batch_time = sbi->s_min_batch_time;
  3968. journal->j_max_batch_time = sbi->s_max_batch_time;
  3969. write_lock(&journal->j_state_lock);
  3970. if (test_opt(sb, BARRIER))
  3971. journal->j_flags |= JBD2_BARRIER;
  3972. else
  3973. journal->j_flags &= ~JBD2_BARRIER;
  3974. if (test_opt(sb, DATA_ERR_ABORT))
  3975. journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
  3976. else
  3977. journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
  3978. write_unlock(&journal->j_state_lock);
  3979. }
  3980. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  3981. unsigned int journal_inum)
  3982. {
  3983. struct inode *journal_inode;
  3984. /*
  3985. * Test for the existence of a valid inode on disk. Bad things
  3986. * happen if we iget() an unused inode, as the subsequent iput()
  3987. * will try to delete it.
  3988. */
  3989. journal_inode = ext4_iget(sb, journal_inum);
  3990. if (IS_ERR(journal_inode)) {
  3991. ext4_msg(sb, KERN_ERR, "no journal found");
  3992. return NULL;
  3993. }
  3994. if (!journal_inode->i_nlink) {
  3995. make_bad_inode(journal_inode);
  3996. iput(journal_inode);
  3997. ext4_msg(sb, KERN_ERR, "journal inode is deleted");
  3998. return NULL;
  3999. }
  4000. jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
  4001. journal_inode, journal_inode->i_size);
  4002. if (!S_ISREG(journal_inode->i_mode)) {
  4003. ext4_msg(sb, KERN_ERR, "invalid journal inode");
  4004. iput(journal_inode);
  4005. return NULL;
  4006. }
  4007. return journal_inode;
  4008. }
  4009. static journal_t *ext4_get_journal(struct super_block *sb,
  4010. unsigned int journal_inum)
  4011. {
  4012. struct inode *journal_inode;
  4013. journal_t *journal;
  4014. BUG_ON(!ext4_has_feature_journal(sb));
  4015. journal_inode = ext4_get_journal_inode(sb, journal_inum);
  4016. if (!journal_inode)
  4017. return NULL;
  4018. journal = jbd2_journal_init_inode(journal_inode);
  4019. if (!journal) {
  4020. ext4_msg(sb, KERN_ERR, "Could not load journal inode");
  4021. iput(journal_inode);
  4022. return NULL;
  4023. }
  4024. journal->j_private = sb;
  4025. ext4_init_journal_params(sb, journal);
  4026. return journal;
  4027. }
  4028. static journal_t *ext4_get_dev_journal(struct super_block *sb,
  4029. dev_t j_dev)
  4030. {
  4031. struct buffer_head *bh;
  4032. journal_t *journal;
  4033. ext4_fsblk_t start;
  4034. ext4_fsblk_t len;
  4035. int hblock, blocksize;
  4036. ext4_fsblk_t sb_block;
  4037. unsigned long offset;
  4038. struct ext4_super_block *es;
  4039. struct block_device *bdev;
  4040. BUG_ON(!ext4_has_feature_journal(sb));
  4041. bdev = ext4_blkdev_get(j_dev, sb);
  4042. if (bdev == NULL)
  4043. return NULL;
  4044. blocksize = sb->s_blocksize;
  4045. hblock = bdev_logical_block_size(bdev);
  4046. if (blocksize < hblock) {
  4047. ext4_msg(sb, KERN_ERR,
  4048. "blocksize too small for journal device");
  4049. goto out_bdev;
  4050. }
  4051. sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
  4052. offset = EXT4_MIN_BLOCK_SIZE % blocksize;
  4053. set_blocksize(bdev, blocksize);
  4054. if (!(bh = __bread(bdev, sb_block, blocksize))) {
  4055. ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
  4056. "external journal");
  4057. goto out_bdev;
  4058. }
  4059. es = (struct ext4_super_block *) (bh->b_data + offset);
  4060. if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
  4061. !(le32_to_cpu(es->s_feature_incompat) &
  4062. EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
  4063. ext4_msg(sb, KERN_ERR, "external journal has "
  4064. "bad superblock");
  4065. brelse(bh);
  4066. goto out_bdev;
  4067. }
  4068. if ((le32_to_cpu(es->s_feature_ro_compat) &
  4069. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
  4070. es->s_checksum != ext4_superblock_csum(sb, es)) {
  4071. ext4_msg(sb, KERN_ERR, "external journal has "
  4072. "corrupt superblock");
  4073. brelse(bh);
  4074. goto out_bdev;
  4075. }
  4076. if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
  4077. ext4_msg(sb, KERN_ERR, "journal UUID does not match");
  4078. brelse(bh);
  4079. goto out_bdev;
  4080. }
  4081. len = ext4_blocks_count(es);
  4082. start = sb_block + 1;
  4083. brelse(bh); /* we're done with the superblock */
  4084. journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
  4085. start, len, blocksize);
  4086. if (!journal) {
  4087. ext4_msg(sb, KERN_ERR, "failed to create device journal");
  4088. goto out_bdev;
  4089. }
  4090. journal->j_private = sb;
  4091. ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
  4092. wait_on_buffer(journal->j_sb_buffer);
  4093. if (!buffer_uptodate(journal->j_sb_buffer)) {
  4094. ext4_msg(sb, KERN_ERR, "I/O error on journal device");
  4095. goto out_journal;
  4096. }
  4097. if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
  4098. ext4_msg(sb, KERN_ERR, "External journal has more than one "
  4099. "user (unsupported) - %d",
  4100. be32_to_cpu(journal->j_superblock->s_nr_users));
  4101. goto out_journal;
  4102. }
  4103. EXT4_SB(sb)->journal_bdev = bdev;
  4104. ext4_init_journal_params(sb, journal);
  4105. return journal;
  4106. out_journal:
  4107. jbd2_journal_destroy(journal);
  4108. out_bdev:
  4109. ext4_blkdev_put(bdev);
  4110. return NULL;
  4111. }
  4112. static int ext4_load_journal(struct super_block *sb,
  4113. struct ext4_super_block *es,
  4114. unsigned long journal_devnum)
  4115. {
  4116. journal_t *journal;
  4117. unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
  4118. dev_t journal_dev;
  4119. int err = 0;
  4120. int really_read_only;
  4121. BUG_ON(!ext4_has_feature_journal(sb));
  4122. if (journal_devnum &&
  4123. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4124. ext4_msg(sb, KERN_INFO, "external journal device major/minor "
  4125. "numbers have changed");
  4126. journal_dev = new_decode_dev(journal_devnum);
  4127. } else
  4128. journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
  4129. really_read_only = bdev_read_only(sb->s_bdev);
  4130. /*
  4131. * Are we loading a blank journal or performing recovery after a
  4132. * crash? For recovery, we need to check in advance whether we
  4133. * can get read-write access to the device.
  4134. */
  4135. if (ext4_has_feature_journal_needs_recovery(sb)) {
  4136. if (sb_rdonly(sb)) {
  4137. ext4_msg(sb, KERN_INFO, "INFO: recovery "
  4138. "required on readonly filesystem");
  4139. if (really_read_only) {
  4140. ext4_msg(sb, KERN_ERR, "write access "
  4141. "unavailable, cannot proceed "
  4142. "(try mounting with noload)");
  4143. return -EROFS;
  4144. }
  4145. ext4_msg(sb, KERN_INFO, "write access will "
  4146. "be enabled during recovery");
  4147. }
  4148. }
  4149. if (journal_inum && journal_dev) {
  4150. ext4_msg(sb, KERN_ERR, "filesystem has both journal "
  4151. "and inode journals!");
  4152. return -EINVAL;
  4153. }
  4154. if (journal_inum) {
  4155. if (!(journal = ext4_get_journal(sb, journal_inum)))
  4156. return -EINVAL;
  4157. } else {
  4158. if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
  4159. return -EINVAL;
  4160. }
  4161. if (!(journal->j_flags & JBD2_BARRIER))
  4162. ext4_msg(sb, KERN_INFO, "barriers disabled");
  4163. if (!ext4_has_feature_journal_needs_recovery(sb))
  4164. err = jbd2_journal_wipe(journal, !really_read_only);
  4165. if (!err) {
  4166. char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
  4167. if (save)
  4168. memcpy(save, ((char *) es) +
  4169. EXT4_S_ERR_START, EXT4_S_ERR_LEN);
  4170. err = jbd2_journal_load(journal);
  4171. if (save)
  4172. memcpy(((char *) es) + EXT4_S_ERR_START,
  4173. save, EXT4_S_ERR_LEN);
  4174. kfree(save);
  4175. }
  4176. if (err) {
  4177. ext4_msg(sb, KERN_ERR, "error loading journal");
  4178. jbd2_journal_destroy(journal);
  4179. return err;
  4180. }
  4181. EXT4_SB(sb)->s_journal = journal;
  4182. ext4_clear_journal_err(sb, es);
  4183. if (!really_read_only && journal_devnum &&
  4184. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4185. es->s_journal_dev = cpu_to_le32(journal_devnum);
  4186. /* Make sure we flush the recovery flag to disk. */
  4187. ext4_commit_super(sb, 1);
  4188. }
  4189. return 0;
  4190. }
  4191. static int ext4_commit_super(struct super_block *sb, int sync)
  4192. {
  4193. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  4194. struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
  4195. int error = 0;
  4196. if (!sbh || block_device_ejected(sb))
  4197. return error;
  4198. /*
  4199. * If the file system is mounted read-only, don't update the
  4200. * superblock write time. This avoids updating the superblock
  4201. * write time when we are mounting the root file system
  4202. * read/only but we need to replay the journal; at that point,
  4203. * for people who are east of GMT and who make their clock
  4204. * tick in localtime for Windows bug-for-bug compatibility,
  4205. * the clock is set in the future, and this will cause e2fsck
  4206. * to complain and force a full file system check.
  4207. */
  4208. if (!(sb->s_flags & SB_RDONLY))
  4209. es->s_wtime = cpu_to_le32(get_seconds());
  4210. if (sb->s_bdev->bd_part)
  4211. es->s_kbytes_written =
  4212. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
  4213. ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
  4214. EXT4_SB(sb)->s_sectors_written_start) >> 1));
  4215. else
  4216. es->s_kbytes_written =
  4217. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
  4218. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
  4219. ext4_free_blocks_count_set(es,
  4220. EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
  4221. &EXT4_SB(sb)->s_freeclusters_counter)));
  4222. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
  4223. es->s_free_inodes_count =
  4224. cpu_to_le32(percpu_counter_sum_positive(
  4225. &EXT4_SB(sb)->s_freeinodes_counter));
  4226. BUFFER_TRACE(sbh, "marking dirty");
  4227. ext4_superblock_csum_set(sb);
  4228. if (sync)
  4229. lock_buffer(sbh);
  4230. if (buffer_write_io_error(sbh)) {
  4231. /*
  4232. * Oh, dear. A previous attempt to write the
  4233. * superblock failed. This could happen because the
  4234. * USB device was yanked out. Or it could happen to
  4235. * be a transient write error and maybe the block will
  4236. * be remapped. Nothing we can do but to retry the
  4237. * write and hope for the best.
  4238. */
  4239. ext4_msg(sb, KERN_ERR, "previous I/O error to "
  4240. "superblock detected");
  4241. clear_buffer_write_io_error(sbh);
  4242. set_buffer_uptodate(sbh);
  4243. }
  4244. mark_buffer_dirty(sbh);
  4245. if (sync) {
  4246. unlock_buffer(sbh);
  4247. error = __sync_dirty_buffer(sbh,
  4248. REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
  4249. if (error)
  4250. return error;
  4251. error = buffer_write_io_error(sbh);
  4252. if (error) {
  4253. ext4_msg(sb, KERN_ERR, "I/O error while writing "
  4254. "superblock");
  4255. clear_buffer_write_io_error(sbh);
  4256. set_buffer_uptodate(sbh);
  4257. }
  4258. }
  4259. return error;
  4260. }
  4261. /*
  4262. * Have we just finished recovery? If so, and if we are mounting (or
  4263. * remounting) the filesystem readonly, then we will end up with a
  4264. * consistent fs on disk. Record that fact.
  4265. */
  4266. static void ext4_mark_recovery_complete(struct super_block *sb,
  4267. struct ext4_super_block *es)
  4268. {
  4269. journal_t *journal = EXT4_SB(sb)->s_journal;
  4270. if (!ext4_has_feature_journal(sb)) {
  4271. BUG_ON(journal != NULL);
  4272. return;
  4273. }
  4274. jbd2_journal_lock_updates(journal);
  4275. if (jbd2_journal_flush(journal) < 0)
  4276. goto out;
  4277. if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
  4278. ext4_clear_feature_journal_needs_recovery(sb);
  4279. ext4_commit_super(sb, 1);
  4280. }
  4281. out:
  4282. jbd2_journal_unlock_updates(journal);
  4283. }
  4284. /*
  4285. * If we are mounting (or read-write remounting) a filesystem whose journal
  4286. * has recorded an error from a previous lifetime, move that error to the
  4287. * main filesystem now.
  4288. */
  4289. static void ext4_clear_journal_err(struct super_block *sb,
  4290. struct ext4_super_block *es)
  4291. {
  4292. journal_t *journal;
  4293. int j_errno;
  4294. const char *errstr;
  4295. BUG_ON(!ext4_has_feature_journal(sb));
  4296. journal = EXT4_SB(sb)->s_journal;
  4297. /*
  4298. * Now check for any error status which may have been recorded in the
  4299. * journal by a prior ext4_error() or ext4_abort()
  4300. */
  4301. j_errno = jbd2_journal_errno(journal);
  4302. if (j_errno) {
  4303. char nbuf[16];
  4304. errstr = ext4_decode_error(sb, j_errno, nbuf);
  4305. ext4_warning(sb, "Filesystem error recorded "
  4306. "from previous mount: %s", errstr);
  4307. ext4_warning(sb, "Marking fs in need of filesystem check.");
  4308. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  4309. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  4310. ext4_commit_super(sb, 1);
  4311. jbd2_journal_clear_err(journal);
  4312. jbd2_journal_update_sb_errno(journal);
  4313. }
  4314. }
  4315. /*
  4316. * Force the running and committing transactions to commit,
  4317. * and wait on the commit.
  4318. */
  4319. int ext4_force_commit(struct super_block *sb)
  4320. {
  4321. journal_t *journal;
  4322. if (sb_rdonly(sb))
  4323. return 0;
  4324. journal = EXT4_SB(sb)->s_journal;
  4325. return ext4_journal_force_commit(journal);
  4326. }
  4327. static int ext4_sync_fs(struct super_block *sb, int wait)
  4328. {
  4329. int ret = 0;
  4330. tid_t target;
  4331. bool needs_barrier = false;
  4332. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4333. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  4334. return 0;
  4335. trace_ext4_sync_fs(sb, wait);
  4336. flush_workqueue(sbi->rsv_conversion_wq);
  4337. /*
  4338. * Writeback quota in non-journalled quota case - journalled quota has
  4339. * no dirty dquots
  4340. */
  4341. dquot_writeback_dquots(sb, -1);
  4342. /*
  4343. * Data writeback is possible w/o journal transaction, so barrier must
  4344. * being sent at the end of the function. But we can skip it if
  4345. * transaction_commit will do it for us.
  4346. */
  4347. if (sbi->s_journal) {
  4348. target = jbd2_get_latest_transaction(sbi->s_journal);
  4349. if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
  4350. !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
  4351. needs_barrier = true;
  4352. if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
  4353. if (wait)
  4354. ret = jbd2_log_wait_commit(sbi->s_journal,
  4355. target);
  4356. }
  4357. } else if (wait && test_opt(sb, BARRIER))
  4358. needs_barrier = true;
  4359. if (needs_barrier) {
  4360. int err;
  4361. err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
  4362. if (!ret)
  4363. ret = err;
  4364. }
  4365. return ret;
  4366. }
  4367. /*
  4368. * LVM calls this function before a (read-only) snapshot is created. This
  4369. * gives us a chance to flush the journal completely and mark the fs clean.
  4370. *
  4371. * Note that only this function cannot bring a filesystem to be in a clean
  4372. * state independently. It relies on upper layer to stop all data & metadata
  4373. * modifications.
  4374. */
  4375. static int ext4_freeze(struct super_block *sb)
  4376. {
  4377. int error = 0;
  4378. journal_t *journal;
  4379. if (sb_rdonly(sb))
  4380. return 0;
  4381. journal = EXT4_SB(sb)->s_journal;
  4382. if (journal) {
  4383. /* Now we set up the journal barrier. */
  4384. jbd2_journal_lock_updates(journal);
  4385. /*
  4386. * Don't clear the needs_recovery flag if we failed to
  4387. * flush the journal.
  4388. */
  4389. error = jbd2_journal_flush(journal);
  4390. if (error < 0)
  4391. goto out;
  4392. /* Journal blocked and flushed, clear needs_recovery flag. */
  4393. ext4_clear_feature_journal_needs_recovery(sb);
  4394. }
  4395. error = ext4_commit_super(sb, 1);
  4396. out:
  4397. if (journal)
  4398. /* we rely on upper layer to stop further updates */
  4399. jbd2_journal_unlock_updates(journal);
  4400. return error;
  4401. }
  4402. /*
  4403. * Called by LVM after the snapshot is done. We need to reset the RECOVER
  4404. * flag here, even though the filesystem is not technically dirty yet.
  4405. */
  4406. static int ext4_unfreeze(struct super_block *sb)
  4407. {
  4408. if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
  4409. return 0;
  4410. if (EXT4_SB(sb)->s_journal) {
  4411. /* Reset the needs_recovery flag before the fs is unlocked. */
  4412. ext4_set_feature_journal_needs_recovery(sb);
  4413. }
  4414. ext4_commit_super(sb, 1);
  4415. return 0;
  4416. }
  4417. /*
  4418. * Structure to save mount options for ext4_remount's benefit
  4419. */
  4420. struct ext4_mount_options {
  4421. unsigned long s_mount_opt;
  4422. unsigned long s_mount_opt2;
  4423. kuid_t s_resuid;
  4424. kgid_t s_resgid;
  4425. unsigned long s_commit_interval;
  4426. u32 s_min_batch_time, s_max_batch_time;
  4427. #ifdef CONFIG_QUOTA
  4428. int s_jquota_fmt;
  4429. char *s_qf_names[EXT4_MAXQUOTAS];
  4430. #endif
  4431. };
  4432. static int ext4_remount(struct super_block *sb, int *flags, char *data)
  4433. {
  4434. struct ext4_super_block *es;
  4435. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4436. unsigned long old_sb_flags;
  4437. struct ext4_mount_options old_opts;
  4438. int enable_quota = 0;
  4439. ext4_group_t g;
  4440. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  4441. int err = 0;
  4442. #ifdef CONFIG_QUOTA
  4443. int i, j;
  4444. #endif
  4445. char *orig_data = kstrdup(data, GFP_KERNEL);
  4446. /* Store the original options */
  4447. old_sb_flags = sb->s_flags;
  4448. old_opts.s_mount_opt = sbi->s_mount_opt;
  4449. old_opts.s_mount_opt2 = sbi->s_mount_opt2;
  4450. old_opts.s_resuid = sbi->s_resuid;
  4451. old_opts.s_resgid = sbi->s_resgid;
  4452. old_opts.s_commit_interval = sbi->s_commit_interval;
  4453. old_opts.s_min_batch_time = sbi->s_min_batch_time;
  4454. old_opts.s_max_batch_time = sbi->s_max_batch_time;
  4455. #ifdef CONFIG_QUOTA
  4456. old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
  4457. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4458. if (sbi->s_qf_names[i]) {
  4459. old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
  4460. GFP_KERNEL);
  4461. if (!old_opts.s_qf_names[i]) {
  4462. for (j = 0; j < i; j++)
  4463. kfree(old_opts.s_qf_names[j]);
  4464. kfree(orig_data);
  4465. return -ENOMEM;
  4466. }
  4467. } else
  4468. old_opts.s_qf_names[i] = NULL;
  4469. #endif
  4470. if (sbi->s_journal && sbi->s_journal->j_task->io_context)
  4471. journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
  4472. if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
  4473. err = -EINVAL;
  4474. goto restore_opts;
  4475. }
  4476. if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
  4477. test_opt(sb, JOURNAL_CHECKSUM)) {
  4478. ext4_msg(sb, KERN_ERR, "changing journal_checksum "
  4479. "during remount not supported; ignoring");
  4480. sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
  4481. }
  4482. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  4483. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  4484. ext4_msg(sb, KERN_ERR, "can't mount with "
  4485. "both data=journal and delalloc");
  4486. err = -EINVAL;
  4487. goto restore_opts;
  4488. }
  4489. if (test_opt(sb, DIOREAD_NOLOCK)) {
  4490. ext4_msg(sb, KERN_ERR, "can't mount with "
  4491. "both data=journal and dioread_nolock");
  4492. err = -EINVAL;
  4493. goto restore_opts;
  4494. }
  4495. if (test_opt(sb, DAX)) {
  4496. ext4_msg(sb, KERN_ERR, "can't mount with "
  4497. "both data=journal and dax");
  4498. err = -EINVAL;
  4499. goto restore_opts;
  4500. }
  4501. } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
  4502. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  4503. ext4_msg(sb, KERN_ERR, "can't mount with "
  4504. "journal_async_commit in data=ordered mode");
  4505. err = -EINVAL;
  4506. goto restore_opts;
  4507. }
  4508. }
  4509. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
  4510. ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
  4511. err = -EINVAL;
  4512. goto restore_opts;
  4513. }
  4514. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
  4515. ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
  4516. "dax flag with busy inodes while remounting");
  4517. sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
  4518. }
  4519. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
  4520. ext4_abort(sb, "Abort forced by user");
  4521. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  4522. (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
  4523. es = sbi->s_es;
  4524. if (sbi->s_journal) {
  4525. ext4_init_journal_params(sb, sbi->s_journal);
  4526. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  4527. }
  4528. if (*flags & SB_LAZYTIME)
  4529. sb->s_flags |= SB_LAZYTIME;
  4530. if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
  4531. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
  4532. err = -EROFS;
  4533. goto restore_opts;
  4534. }
  4535. if (*flags & SB_RDONLY) {
  4536. err = sync_filesystem(sb);
  4537. if (err < 0)
  4538. goto restore_opts;
  4539. err = dquot_suspend(sb, -1);
  4540. if (err < 0)
  4541. goto restore_opts;
  4542. /*
  4543. * First of all, the unconditional stuff we have to do
  4544. * to disable replay of the journal when we next remount
  4545. */
  4546. sb->s_flags |= SB_RDONLY;
  4547. /*
  4548. * OK, test if we are remounting a valid rw partition
  4549. * readonly, and if so set the rdonly flag and then
  4550. * mark the partition as valid again.
  4551. */
  4552. if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
  4553. (sbi->s_mount_state & EXT4_VALID_FS))
  4554. es->s_state = cpu_to_le16(sbi->s_mount_state);
  4555. if (sbi->s_journal)
  4556. ext4_mark_recovery_complete(sb, es);
  4557. } else {
  4558. /* Make sure we can mount this feature set readwrite */
  4559. if (ext4_has_feature_readonly(sb) ||
  4560. !ext4_feature_set_ok(sb, 0)) {
  4561. err = -EROFS;
  4562. goto restore_opts;
  4563. }
  4564. /*
  4565. * Make sure the group descriptor checksums
  4566. * are sane. If they aren't, refuse to remount r/w.
  4567. */
  4568. for (g = 0; g < sbi->s_groups_count; g++) {
  4569. struct ext4_group_desc *gdp =
  4570. ext4_get_group_desc(sb, g, NULL);
  4571. if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
  4572. ext4_msg(sb, KERN_ERR,
  4573. "ext4_remount: Checksum for group %u failed (%u!=%u)",
  4574. g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
  4575. le16_to_cpu(gdp->bg_checksum));
  4576. err = -EFSBADCRC;
  4577. goto restore_opts;
  4578. }
  4579. }
  4580. /*
  4581. * If we have an unprocessed orphan list hanging
  4582. * around from a previously readonly bdev mount,
  4583. * require a full umount/remount for now.
  4584. */
  4585. if (es->s_last_orphan) {
  4586. ext4_msg(sb, KERN_WARNING, "Couldn't "
  4587. "remount RDWR because of unprocessed "
  4588. "orphan inode list. Please "
  4589. "umount/remount instead");
  4590. err = -EINVAL;
  4591. goto restore_opts;
  4592. }
  4593. /*
  4594. * Mounting a RDONLY partition read-write, so reread
  4595. * and store the current valid flag. (It may have
  4596. * been changed by e2fsck since we originally mounted
  4597. * the partition.)
  4598. */
  4599. if (sbi->s_journal)
  4600. ext4_clear_journal_err(sb, es);
  4601. sbi->s_mount_state = le16_to_cpu(es->s_state);
  4602. if (!ext4_setup_super(sb, es, 0))
  4603. sb->s_flags &= ~SB_RDONLY;
  4604. if (ext4_has_feature_mmp(sb))
  4605. if (ext4_multi_mount_protect(sb,
  4606. le64_to_cpu(es->s_mmp_block))) {
  4607. err = -EROFS;
  4608. goto restore_opts;
  4609. }
  4610. enable_quota = 1;
  4611. }
  4612. }
  4613. /*
  4614. * Reinitialize lazy itable initialization thread based on
  4615. * current settings
  4616. */
  4617. if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
  4618. ext4_unregister_li_request(sb);
  4619. else {
  4620. ext4_group_t first_not_zeroed;
  4621. first_not_zeroed = ext4_has_uninit_itable(sb);
  4622. ext4_register_li_request(sb, first_not_zeroed);
  4623. }
  4624. ext4_setup_system_zone(sb);
  4625. if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY))
  4626. ext4_commit_super(sb, 1);
  4627. #ifdef CONFIG_QUOTA
  4628. /* Release old quota file names */
  4629. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4630. kfree(old_opts.s_qf_names[i]);
  4631. if (enable_quota) {
  4632. if (sb_any_quota_suspended(sb))
  4633. dquot_resume(sb, -1);
  4634. else if (ext4_has_feature_quota(sb)) {
  4635. err = ext4_enable_quotas(sb);
  4636. if (err)
  4637. goto restore_opts;
  4638. }
  4639. }
  4640. #endif
  4641. *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
  4642. ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
  4643. kfree(orig_data);
  4644. return 0;
  4645. restore_opts:
  4646. sb->s_flags = old_sb_flags;
  4647. sbi->s_mount_opt = old_opts.s_mount_opt;
  4648. sbi->s_mount_opt2 = old_opts.s_mount_opt2;
  4649. sbi->s_resuid = old_opts.s_resuid;
  4650. sbi->s_resgid = old_opts.s_resgid;
  4651. sbi->s_commit_interval = old_opts.s_commit_interval;
  4652. sbi->s_min_batch_time = old_opts.s_min_batch_time;
  4653. sbi->s_max_batch_time = old_opts.s_max_batch_time;
  4654. #ifdef CONFIG_QUOTA
  4655. sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
  4656. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  4657. kfree(sbi->s_qf_names[i]);
  4658. sbi->s_qf_names[i] = old_opts.s_qf_names[i];
  4659. }
  4660. #endif
  4661. kfree(orig_data);
  4662. return err;
  4663. }
  4664. #ifdef CONFIG_QUOTA
  4665. static int ext4_statfs_project(struct super_block *sb,
  4666. kprojid_t projid, struct kstatfs *buf)
  4667. {
  4668. struct kqid qid;
  4669. struct dquot *dquot;
  4670. u64 limit;
  4671. u64 curblock;
  4672. qid = make_kqid_projid(projid);
  4673. dquot = dqget(sb, qid);
  4674. if (IS_ERR(dquot))
  4675. return PTR_ERR(dquot);
  4676. spin_lock(&dquot->dq_dqb_lock);
  4677. limit = (dquot->dq_dqb.dqb_bsoftlimit ?
  4678. dquot->dq_dqb.dqb_bsoftlimit :
  4679. dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
  4680. if (limit && buf->f_blocks > limit) {
  4681. curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
  4682. buf->f_blocks = limit;
  4683. buf->f_bfree = buf->f_bavail =
  4684. (buf->f_blocks > curblock) ?
  4685. (buf->f_blocks - curblock) : 0;
  4686. }
  4687. limit = dquot->dq_dqb.dqb_isoftlimit ?
  4688. dquot->dq_dqb.dqb_isoftlimit :
  4689. dquot->dq_dqb.dqb_ihardlimit;
  4690. if (limit && buf->f_files > limit) {
  4691. buf->f_files = limit;
  4692. buf->f_ffree =
  4693. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  4694. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  4695. }
  4696. spin_unlock(&dquot->dq_dqb_lock);
  4697. dqput(dquot);
  4698. return 0;
  4699. }
  4700. #endif
  4701. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
  4702. {
  4703. struct super_block *sb = dentry->d_sb;
  4704. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4705. struct ext4_super_block *es = sbi->s_es;
  4706. ext4_fsblk_t overhead = 0, resv_blocks;
  4707. u64 fsid;
  4708. s64 bfree;
  4709. resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
  4710. if (!test_opt(sb, MINIX_DF))
  4711. overhead = sbi->s_overhead;
  4712. buf->f_type = EXT4_SUPER_MAGIC;
  4713. buf->f_bsize = sb->s_blocksize;
  4714. buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
  4715. bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
  4716. percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
  4717. /* prevent underflow in case that few free space is available */
  4718. buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
  4719. buf->f_bavail = buf->f_bfree -
  4720. (ext4_r_blocks_count(es) + resv_blocks);
  4721. if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
  4722. buf->f_bavail = 0;
  4723. buf->f_files = le32_to_cpu(es->s_inodes_count);
  4724. buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
  4725. buf->f_namelen = EXT4_NAME_LEN;
  4726. fsid = le64_to_cpup((void *)es->s_uuid) ^
  4727. le64_to_cpup((void *)es->s_uuid + sizeof(u64));
  4728. buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
  4729. buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
  4730. #ifdef CONFIG_QUOTA
  4731. if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
  4732. sb_has_quota_limits_enabled(sb, PRJQUOTA))
  4733. ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
  4734. #endif
  4735. return 0;
  4736. }
  4737. #ifdef CONFIG_QUOTA
  4738. /*
  4739. * Helper functions so that transaction is started before we acquire dqio_sem
  4740. * to keep correct lock ordering of transaction > dqio_sem
  4741. */
  4742. static inline struct inode *dquot_to_inode(struct dquot *dquot)
  4743. {
  4744. return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
  4745. }
  4746. static int ext4_write_dquot(struct dquot *dquot)
  4747. {
  4748. int ret, err;
  4749. handle_t *handle;
  4750. struct inode *inode;
  4751. inode = dquot_to_inode(dquot);
  4752. handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
  4753. EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
  4754. if (IS_ERR(handle))
  4755. return PTR_ERR(handle);
  4756. ret = dquot_commit(dquot);
  4757. err = ext4_journal_stop(handle);
  4758. if (!ret)
  4759. ret = err;
  4760. return ret;
  4761. }
  4762. static int ext4_acquire_dquot(struct dquot *dquot)
  4763. {
  4764. int ret, err;
  4765. handle_t *handle;
  4766. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4767. EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
  4768. if (IS_ERR(handle))
  4769. return PTR_ERR(handle);
  4770. ret = dquot_acquire(dquot);
  4771. err = ext4_journal_stop(handle);
  4772. if (!ret)
  4773. ret = err;
  4774. return ret;
  4775. }
  4776. static int ext4_release_dquot(struct dquot *dquot)
  4777. {
  4778. int ret, err;
  4779. handle_t *handle;
  4780. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4781. EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
  4782. if (IS_ERR(handle)) {
  4783. /* Release dquot anyway to avoid endless cycle in dqput() */
  4784. dquot_release(dquot);
  4785. return PTR_ERR(handle);
  4786. }
  4787. ret = dquot_release(dquot);
  4788. err = ext4_journal_stop(handle);
  4789. if (!ret)
  4790. ret = err;
  4791. return ret;
  4792. }
  4793. static int ext4_mark_dquot_dirty(struct dquot *dquot)
  4794. {
  4795. struct super_block *sb = dquot->dq_sb;
  4796. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4797. /* Are we journaling quotas? */
  4798. if (ext4_has_feature_quota(sb) ||
  4799. sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  4800. dquot_mark_dquot_dirty(dquot);
  4801. return ext4_write_dquot(dquot);
  4802. } else {
  4803. return dquot_mark_dquot_dirty(dquot);
  4804. }
  4805. }
  4806. static int ext4_write_info(struct super_block *sb, int type)
  4807. {
  4808. int ret, err;
  4809. handle_t *handle;
  4810. /* Data block + inode block */
  4811. handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
  4812. if (IS_ERR(handle))
  4813. return PTR_ERR(handle);
  4814. ret = dquot_commit_info(sb, type);
  4815. err = ext4_journal_stop(handle);
  4816. if (!ret)
  4817. ret = err;
  4818. return ret;
  4819. }
  4820. /*
  4821. * Turn on quotas during mount time - we need to find
  4822. * the quota file and such...
  4823. */
  4824. static int ext4_quota_on_mount(struct super_block *sb, int type)
  4825. {
  4826. return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
  4827. EXT4_SB(sb)->s_jquota_fmt, type);
  4828. }
  4829. static void lockdep_set_quota_inode(struct inode *inode, int subclass)
  4830. {
  4831. struct ext4_inode_info *ei = EXT4_I(inode);
  4832. /* The first argument of lockdep_set_subclass has to be
  4833. * *exactly* the same as the argument to init_rwsem() --- in
  4834. * this case, in init_once() --- or lockdep gets unhappy
  4835. * because the name of the lock is set using the
  4836. * stringification of the argument to init_rwsem().
  4837. */
  4838. (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
  4839. lockdep_set_subclass(&ei->i_data_sem, subclass);
  4840. }
  4841. /*
  4842. * Standard function to be called on quota_on
  4843. */
  4844. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  4845. const struct path *path)
  4846. {
  4847. int err;
  4848. if (!test_opt(sb, QUOTA))
  4849. return -EINVAL;
  4850. /* Quotafile not on the same filesystem? */
  4851. if (path->dentry->d_sb != sb)
  4852. return -EXDEV;
  4853. /* Journaling quota? */
  4854. if (EXT4_SB(sb)->s_qf_names[type]) {
  4855. /* Quotafile not in fs root? */
  4856. if (path->dentry->d_parent != sb->s_root)
  4857. ext4_msg(sb, KERN_WARNING,
  4858. "Quota file not on filesystem root. "
  4859. "Journaled quota will not work");
  4860. sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
  4861. } else {
  4862. /*
  4863. * Clear the flag just in case mount options changed since
  4864. * last time.
  4865. */
  4866. sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
  4867. }
  4868. /*
  4869. * When we journal data on quota file, we have to flush journal to see
  4870. * all updates to the file when we bypass pagecache...
  4871. */
  4872. if (EXT4_SB(sb)->s_journal &&
  4873. ext4_should_journal_data(d_inode(path->dentry))) {
  4874. /*
  4875. * We don't need to lock updates but journal_flush() could
  4876. * otherwise be livelocked...
  4877. */
  4878. jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
  4879. err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
  4880. jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
  4881. if (err)
  4882. return err;
  4883. }
  4884. lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
  4885. err = dquot_quota_on(sb, type, format_id, path);
  4886. if (err) {
  4887. lockdep_set_quota_inode(path->dentry->d_inode,
  4888. I_DATA_SEM_NORMAL);
  4889. } else {
  4890. struct inode *inode = d_inode(path->dentry);
  4891. handle_t *handle;
  4892. /*
  4893. * Set inode flags to prevent userspace from messing with quota
  4894. * files. If this fails, we return success anyway since quotas
  4895. * are already enabled and this is not a hard failure.
  4896. */
  4897. inode_lock(inode);
  4898. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  4899. if (IS_ERR(handle))
  4900. goto unlock_inode;
  4901. EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
  4902. inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
  4903. S_NOATIME | S_IMMUTABLE);
  4904. ext4_mark_inode_dirty(handle, inode);
  4905. ext4_journal_stop(handle);
  4906. unlock_inode:
  4907. inode_unlock(inode);
  4908. }
  4909. return err;
  4910. }
  4911. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  4912. unsigned int flags)
  4913. {
  4914. int err;
  4915. struct inode *qf_inode;
  4916. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4917. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4918. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4919. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4920. };
  4921. BUG_ON(!ext4_has_feature_quota(sb));
  4922. if (!qf_inums[type])
  4923. return -EPERM;
  4924. qf_inode = ext4_iget(sb, qf_inums[type]);
  4925. if (IS_ERR(qf_inode)) {
  4926. ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
  4927. return PTR_ERR(qf_inode);
  4928. }
  4929. /* Don't account quota for quota files to avoid recursion */
  4930. qf_inode->i_flags |= S_NOQUOTA;
  4931. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
  4932. err = dquot_enable(qf_inode, type, format_id, flags);
  4933. iput(qf_inode);
  4934. if (err)
  4935. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
  4936. return err;
  4937. }
  4938. /* Enable usage tracking for all quota types. */
  4939. static int ext4_enable_quotas(struct super_block *sb)
  4940. {
  4941. int type, err = 0;
  4942. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4943. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4944. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4945. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4946. };
  4947. bool quota_mopt[EXT4_MAXQUOTAS] = {
  4948. test_opt(sb, USRQUOTA),
  4949. test_opt(sb, GRPQUOTA),
  4950. test_opt(sb, PRJQUOTA),
  4951. };
  4952. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
  4953. for (type = 0; type < EXT4_MAXQUOTAS; type++) {
  4954. if (qf_inums[type]) {
  4955. err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
  4956. DQUOT_USAGE_ENABLED |
  4957. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  4958. if (err) {
  4959. for (type--; type >= 0; type--)
  4960. dquot_quota_off(sb, type);
  4961. ext4_warning(sb,
  4962. "Failed to enable quota tracking "
  4963. "(type=%d, err=%d). Please run "
  4964. "e2fsck to fix.", type, err);
  4965. return err;
  4966. }
  4967. }
  4968. }
  4969. return 0;
  4970. }
  4971. static int ext4_quota_off(struct super_block *sb, int type)
  4972. {
  4973. struct inode *inode = sb_dqopt(sb)->files[type];
  4974. handle_t *handle;
  4975. int err;
  4976. /* Force all delayed allocation blocks to be allocated.
  4977. * Caller already holds s_umount sem */
  4978. if (test_opt(sb, DELALLOC))
  4979. sync_filesystem(sb);
  4980. if (!inode || !igrab(inode))
  4981. goto out;
  4982. err = dquot_quota_off(sb, type);
  4983. if (err || ext4_has_feature_quota(sb))
  4984. goto out_put;
  4985. inode_lock(inode);
  4986. /*
  4987. * Update modification times of quota files when userspace can
  4988. * start looking at them. If we fail, we return success anyway since
  4989. * this is not a hard failure and quotas are already disabled.
  4990. */
  4991. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  4992. if (IS_ERR(handle))
  4993. goto out_unlock;
  4994. EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
  4995. inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
  4996. inode->i_mtime = inode->i_ctime = current_time(inode);
  4997. ext4_mark_inode_dirty(handle, inode);
  4998. ext4_journal_stop(handle);
  4999. out_unlock:
  5000. inode_unlock(inode);
  5001. out_put:
  5002. lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
  5003. iput(inode);
  5004. return err;
  5005. out:
  5006. return dquot_quota_off(sb, type);
  5007. }
  5008. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  5009. * acquiring the locks... As quota files are never truncated and quota code
  5010. * itself serializes the operations (and no one else should touch the files)
  5011. * we don't have to be afraid of races */
  5012. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  5013. size_t len, loff_t off)
  5014. {
  5015. struct inode *inode = sb_dqopt(sb)->files[type];
  5016. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5017. int offset = off & (sb->s_blocksize - 1);
  5018. int tocopy;
  5019. size_t toread;
  5020. struct buffer_head *bh;
  5021. loff_t i_size = i_size_read(inode);
  5022. if (off > i_size)
  5023. return 0;
  5024. if (off+len > i_size)
  5025. len = i_size-off;
  5026. toread = len;
  5027. while (toread > 0) {
  5028. tocopy = sb->s_blocksize - offset < toread ?
  5029. sb->s_blocksize - offset : toread;
  5030. bh = ext4_bread(NULL, inode, blk, 0);
  5031. if (IS_ERR(bh))
  5032. return PTR_ERR(bh);
  5033. if (!bh) /* A hole? */
  5034. memset(data, 0, tocopy);
  5035. else
  5036. memcpy(data, bh->b_data+offset, tocopy);
  5037. brelse(bh);
  5038. offset = 0;
  5039. toread -= tocopy;
  5040. data += tocopy;
  5041. blk++;
  5042. }
  5043. return len;
  5044. }
  5045. /* Write to quotafile (we know the transaction is already started and has
  5046. * enough credits) */
  5047. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  5048. const char *data, size_t len, loff_t off)
  5049. {
  5050. struct inode *inode = sb_dqopt(sb)->files[type];
  5051. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5052. int err, offset = off & (sb->s_blocksize - 1);
  5053. int retries = 0;
  5054. struct buffer_head *bh;
  5055. handle_t *handle = journal_current_handle();
  5056. if (EXT4_SB(sb)->s_journal && !handle) {
  5057. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5058. " cancelled because transaction is not started",
  5059. (unsigned long long)off, (unsigned long long)len);
  5060. return -EIO;
  5061. }
  5062. /*
  5063. * Since we account only one data block in transaction credits,
  5064. * then it is impossible to cross a block boundary.
  5065. */
  5066. if (sb->s_blocksize - offset < len) {
  5067. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5068. " cancelled because not block aligned",
  5069. (unsigned long long)off, (unsigned long long)len);
  5070. return -EIO;
  5071. }
  5072. do {
  5073. bh = ext4_bread(handle, inode, blk,
  5074. EXT4_GET_BLOCKS_CREATE |
  5075. EXT4_GET_BLOCKS_METADATA_NOFAIL);
  5076. } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
  5077. ext4_should_retry_alloc(inode->i_sb, &retries));
  5078. if (IS_ERR(bh))
  5079. return PTR_ERR(bh);
  5080. if (!bh)
  5081. goto out;
  5082. BUFFER_TRACE(bh, "get write access");
  5083. err = ext4_journal_get_write_access(handle, bh);
  5084. if (err) {
  5085. brelse(bh);
  5086. return err;
  5087. }
  5088. lock_buffer(bh);
  5089. memcpy(bh->b_data+offset, data, len);
  5090. flush_dcache_page(bh->b_page);
  5091. unlock_buffer(bh);
  5092. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  5093. brelse(bh);
  5094. out:
  5095. if (inode->i_size < off + len) {
  5096. i_size_write(inode, off + len);
  5097. EXT4_I(inode)->i_disksize = inode->i_size;
  5098. ext4_mark_inode_dirty(handle, inode);
  5099. }
  5100. return len;
  5101. }
  5102. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
  5103. {
  5104. const struct quota_format_ops *ops;
  5105. if (!sb_has_quota_loaded(sb, qid->type))
  5106. return -ESRCH;
  5107. ops = sb_dqopt(sb)->ops[qid->type];
  5108. if (!ops || !ops->get_next_id)
  5109. return -ENOSYS;
  5110. return dquot_get_next_id(sb, qid);
  5111. }
  5112. #endif
  5113. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  5114. const char *dev_name, void *data)
  5115. {
  5116. return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
  5117. }
  5118. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  5119. static inline void register_as_ext2(void)
  5120. {
  5121. int err = register_filesystem(&ext2_fs_type);
  5122. if (err)
  5123. printk(KERN_WARNING
  5124. "EXT4-fs: Unable to register as ext2 (%d)\n", err);
  5125. }
  5126. static inline void unregister_as_ext2(void)
  5127. {
  5128. unregister_filesystem(&ext2_fs_type);
  5129. }
  5130. static inline int ext2_feature_set_ok(struct super_block *sb)
  5131. {
  5132. if (ext4_has_unknown_ext2_incompat_features(sb))
  5133. return 0;
  5134. if (sb_rdonly(sb))
  5135. return 1;
  5136. if (ext4_has_unknown_ext2_ro_compat_features(sb))
  5137. return 0;
  5138. return 1;
  5139. }
  5140. #else
  5141. static inline void register_as_ext2(void) { }
  5142. static inline void unregister_as_ext2(void) { }
  5143. static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
  5144. #endif
  5145. static inline void register_as_ext3(void)
  5146. {
  5147. int err = register_filesystem(&ext3_fs_type);
  5148. if (err)
  5149. printk(KERN_WARNING
  5150. "EXT4-fs: Unable to register as ext3 (%d)\n", err);
  5151. }
  5152. static inline void unregister_as_ext3(void)
  5153. {
  5154. unregister_filesystem(&ext3_fs_type);
  5155. }
  5156. static inline int ext3_feature_set_ok(struct super_block *sb)
  5157. {
  5158. if (ext4_has_unknown_ext3_incompat_features(sb))
  5159. return 0;
  5160. if (!ext4_has_feature_journal(sb))
  5161. return 0;
  5162. if (sb_rdonly(sb))
  5163. return 1;
  5164. if (ext4_has_unknown_ext3_ro_compat_features(sb))
  5165. return 0;
  5166. return 1;
  5167. }
  5168. static struct file_system_type ext4_fs_type = {
  5169. .owner = THIS_MODULE,
  5170. .name = "ext4",
  5171. .mount = ext4_mount,
  5172. .kill_sb = kill_block_super,
  5173. .fs_flags = FS_REQUIRES_DEV,
  5174. };
  5175. MODULE_ALIAS_FS("ext4");
  5176. /* Shared across all ext4 file systems */
  5177. wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
  5178. static int __init ext4_init_fs(void)
  5179. {
  5180. int i, err;
  5181. ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
  5182. ext4_li_info = NULL;
  5183. mutex_init(&ext4_li_mtx);
  5184. /* Build-time check for flags consistency */
  5185. ext4_check_flag_values();
  5186. for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
  5187. init_waitqueue_head(&ext4__ioend_wq[i]);
  5188. err = ext4_init_es();
  5189. if (err)
  5190. return err;
  5191. err = ext4_init_pageio();
  5192. if (err)
  5193. goto out5;
  5194. err = ext4_init_system_zone();
  5195. if (err)
  5196. goto out4;
  5197. err = ext4_init_sysfs();
  5198. if (err)
  5199. goto out3;
  5200. err = ext4_init_mballoc();
  5201. if (err)
  5202. goto out2;
  5203. err = init_inodecache();
  5204. if (err)
  5205. goto out1;
  5206. register_as_ext3();
  5207. register_as_ext2();
  5208. err = register_filesystem(&ext4_fs_type);
  5209. if (err)
  5210. goto out;
  5211. return 0;
  5212. out:
  5213. unregister_as_ext2();
  5214. unregister_as_ext3();
  5215. destroy_inodecache();
  5216. out1:
  5217. ext4_exit_mballoc();
  5218. out2:
  5219. ext4_exit_sysfs();
  5220. out3:
  5221. ext4_exit_system_zone();
  5222. out4:
  5223. ext4_exit_pageio();
  5224. out5:
  5225. ext4_exit_es();
  5226. return err;
  5227. }
  5228. static void __exit ext4_exit_fs(void)
  5229. {
  5230. ext4_destroy_lazyinit_thread();
  5231. unregister_as_ext2();
  5232. unregister_as_ext3();
  5233. unregister_filesystem(&ext4_fs_type);
  5234. destroy_inodecache();
  5235. ext4_exit_mballoc();
  5236. ext4_exit_sysfs();
  5237. ext4_exit_system_zone();
  5238. ext4_exit_pageio();
  5239. ext4_exit_es();
  5240. }
  5241. MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
  5242. MODULE_DESCRIPTION("Fourth Extended Filesystem");
  5243. MODULE_LICENSE("GPL");
  5244. module_init(ext4_init_fs)
  5245. module_exit(ext4_exit_fs)