inode.c 176 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105
  1. /*
  2. * linux/fs/ext4/inode.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Goal-directed block allocation by Stephen Tweedie
  16. * (sct@redhat.com), 1993, 1998
  17. * Big-endian to little-endian byte-swapping/bitmaps by
  18. * David S. Miller (davem@caip.rutgers.edu), 1995
  19. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  20. * (jj@sunsite.ms.mff.cuni.cz)
  21. *
  22. * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  23. */
  24. #include <linux/module.h>
  25. #include <linux/fs.h>
  26. #include <linux/time.h>
  27. #include <linux/jbd2.h>
  28. #include <linux/highuid.h>
  29. #include <linux/pagemap.h>
  30. #include <linux/quotaops.h>
  31. #include <linux/string.h>
  32. #include <linux/buffer_head.h>
  33. #include <linux/writeback.h>
  34. #include <linux/pagevec.h>
  35. #include <linux/mpage.h>
  36. #include <linux/namei.h>
  37. #include <linux/uio.h>
  38. #include <linux/bio.h>
  39. #include <linux/workqueue.h>
  40. #include <linux/kernel.h>
  41. #include <linux/slab.h>
  42. #include "ext4_jbd2.h"
  43. #include "xattr.h"
  44. #include "acl.h"
  45. #include "ext4_extents.h"
  46. #include <trace/events/ext4.h>
  47. #define MPAGE_DA_EXTENT_TAIL 0x01
  48. static inline int ext4_begin_ordered_truncate(struct inode *inode,
  49. loff_t new_size)
  50. {
  51. return jbd2_journal_begin_ordered_truncate(
  52. EXT4_SB(inode->i_sb)->s_journal,
  53. &EXT4_I(inode)->jinode,
  54. new_size);
  55. }
  56. static void ext4_invalidatepage(struct page *page, unsigned long offset);
  57. /*
  58. * Test whether an inode is a fast symlink.
  59. */
  60. static int ext4_inode_is_fast_symlink(struct inode *inode)
  61. {
  62. int ea_blocks = EXT4_I(inode)->i_file_acl ?
  63. (inode->i_sb->s_blocksize >> 9) : 0;
  64. return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  65. }
  66. /*
  67. * Work out how many blocks we need to proceed with the next chunk of a
  68. * truncate transaction.
  69. */
  70. static unsigned long blocks_for_truncate(struct inode *inode)
  71. {
  72. ext4_lblk_t needed;
  73. needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
  74. /* Give ourselves just enough room to cope with inodes in which
  75. * i_blocks is corrupt: we've seen disk corruptions in the past
  76. * which resulted in random data in an inode which looked enough
  77. * like a regular file for ext4 to try to delete it. Things
  78. * will go a bit crazy if that happens, but at least we should
  79. * try not to panic the whole kernel. */
  80. if (needed < 2)
  81. needed = 2;
  82. /* But we need to bound the transaction so we don't overflow the
  83. * journal. */
  84. if (needed > EXT4_MAX_TRANS_DATA)
  85. needed = EXT4_MAX_TRANS_DATA;
  86. return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
  87. }
  88. /*
  89. * Truncate transactions can be complex and absolutely huge. So we need to
  90. * be able to restart the transaction at a conventient checkpoint to make
  91. * sure we don't overflow the journal.
  92. *
  93. * start_transaction gets us a new handle for a truncate transaction,
  94. * and extend_transaction tries to extend the existing one a bit. If
  95. * extend fails, we need to propagate the failure up and restart the
  96. * transaction in the top-level truncate loop. --sct
  97. */
  98. static handle_t *start_transaction(struct inode *inode)
  99. {
  100. handle_t *result;
  101. result = ext4_journal_start(inode, blocks_for_truncate(inode));
  102. if (!IS_ERR(result))
  103. return result;
  104. ext4_std_error(inode->i_sb, PTR_ERR(result));
  105. return result;
  106. }
  107. /*
  108. * Try to extend this transaction for the purposes of truncation.
  109. *
  110. * Returns 0 if we managed to create more room. If we can't create more
  111. * room, and the transaction must be restarted we return 1.
  112. */
  113. static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  114. {
  115. if (!ext4_handle_valid(handle))
  116. return 0;
  117. if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
  118. return 0;
  119. if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
  120. return 0;
  121. return 1;
  122. }
  123. /*
  124. * Restart the transaction associated with *handle. This does a commit,
  125. * so before we call here everything must be consistently dirtied against
  126. * this transaction.
  127. */
  128. int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
  129. int nblocks)
  130. {
  131. int ret;
  132. /*
  133. * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
  134. * moment, get_block can be called only for blocks inside i_size since
  135. * page cache has been already dropped and writes are blocked by
  136. * i_mutex. So we can safely drop the i_data_sem here.
  137. */
  138. BUG_ON(EXT4_JOURNAL(inode) == NULL);
  139. jbd_debug(2, "restarting handle %p\n", handle);
  140. up_write(&EXT4_I(inode)->i_data_sem);
  141. ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
  142. down_write(&EXT4_I(inode)->i_data_sem);
  143. ext4_discard_preallocations(inode);
  144. return ret;
  145. }
  146. /*
  147. * Called at the last iput() if i_nlink is zero.
  148. */
  149. void ext4_delete_inode(struct inode *inode)
  150. {
  151. handle_t *handle;
  152. int err;
  153. if (!is_bad_inode(inode))
  154. dquot_initialize(inode);
  155. if (ext4_should_order_data(inode))
  156. ext4_begin_ordered_truncate(inode, 0);
  157. truncate_inode_pages(&inode->i_data, 0);
  158. if (is_bad_inode(inode))
  159. goto no_delete;
  160. handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
  161. if (IS_ERR(handle)) {
  162. ext4_std_error(inode->i_sb, PTR_ERR(handle));
  163. /*
  164. * If we're going to skip the normal cleanup, we still need to
  165. * make sure that the in-core orphan linked list is properly
  166. * cleaned up.
  167. */
  168. ext4_orphan_del(NULL, inode);
  169. goto no_delete;
  170. }
  171. if (IS_SYNC(inode))
  172. ext4_handle_sync(handle);
  173. inode->i_size = 0;
  174. err = ext4_mark_inode_dirty(handle, inode);
  175. if (err) {
  176. ext4_warning(inode->i_sb,
  177. "couldn't mark inode dirty (err %d)", err);
  178. goto stop_handle;
  179. }
  180. if (inode->i_blocks)
  181. ext4_truncate(inode);
  182. /*
  183. * ext4_ext_truncate() doesn't reserve any slop when it
  184. * restarts journal transactions; therefore there may not be
  185. * enough credits left in the handle to remove the inode from
  186. * the orphan list and set the dtime field.
  187. */
  188. if (!ext4_handle_has_enough_credits(handle, 3)) {
  189. err = ext4_journal_extend(handle, 3);
  190. if (err > 0)
  191. err = ext4_journal_restart(handle, 3);
  192. if (err != 0) {
  193. ext4_warning(inode->i_sb,
  194. "couldn't extend journal (err %d)", err);
  195. stop_handle:
  196. ext4_journal_stop(handle);
  197. goto no_delete;
  198. }
  199. }
  200. /*
  201. * Kill off the orphan record which ext4_truncate created.
  202. * AKPM: I think this can be inside the above `if'.
  203. * Note that ext4_orphan_del() has to be able to cope with the
  204. * deletion of a non-existent orphan - this is because we don't
  205. * know if ext4_truncate() actually created an orphan record.
  206. * (Well, we could do this if we need to, but heck - it works)
  207. */
  208. ext4_orphan_del(handle, inode);
  209. EXT4_I(inode)->i_dtime = get_seconds();
  210. /*
  211. * One subtle ordering requirement: if anything has gone wrong
  212. * (transaction abort, IO errors, whatever), then we can still
  213. * do these next steps (the fs will already have been marked as
  214. * having errors), but we can't free the inode if the mark_dirty
  215. * fails.
  216. */
  217. if (ext4_mark_inode_dirty(handle, inode))
  218. /* If that failed, just do the required in-core inode clear. */
  219. clear_inode(inode);
  220. else
  221. ext4_free_inode(handle, inode);
  222. ext4_journal_stop(handle);
  223. return;
  224. no_delete:
  225. clear_inode(inode); /* We must guarantee clearing of inode... */
  226. }
  227. typedef struct {
  228. __le32 *p;
  229. __le32 key;
  230. struct buffer_head *bh;
  231. } Indirect;
  232. static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
  233. {
  234. p->key = *(p->p = v);
  235. p->bh = bh;
  236. }
  237. /**
  238. * ext4_block_to_path - parse the block number into array of offsets
  239. * @inode: inode in question (we are only interested in its superblock)
  240. * @i_block: block number to be parsed
  241. * @offsets: array to store the offsets in
  242. * @boundary: set this non-zero if the referred-to block is likely to be
  243. * followed (on disk) by an indirect block.
  244. *
  245. * To store the locations of file's data ext4 uses a data structure common
  246. * for UNIX filesystems - tree of pointers anchored in the inode, with
  247. * data blocks at leaves and indirect blocks in intermediate nodes.
  248. * This function translates the block number into path in that tree -
  249. * return value is the path length and @offsets[n] is the offset of
  250. * pointer to (n+1)th node in the nth one. If @block is out of range
  251. * (negative or too large) warning is printed and zero returned.
  252. *
  253. * Note: function doesn't find node addresses, so no IO is needed. All
  254. * we need to know is the capacity of indirect blocks (taken from the
  255. * inode->i_sb).
  256. */
  257. /*
  258. * Portability note: the last comparison (check that we fit into triple
  259. * indirect block) is spelled differently, because otherwise on an
  260. * architecture with 32-bit longs and 8Kb pages we might get into trouble
  261. * if our filesystem had 8Kb blocks. We might use long long, but that would
  262. * kill us on x86. Oh, well, at least the sign propagation does not matter -
  263. * i_block would have to be negative in the very beginning, so we would not
  264. * get there at all.
  265. */
  266. static int ext4_block_to_path(struct inode *inode,
  267. ext4_lblk_t i_block,
  268. ext4_lblk_t offsets[4], int *boundary)
  269. {
  270. int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  271. int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
  272. const long direct_blocks = EXT4_NDIR_BLOCKS,
  273. indirect_blocks = ptrs,
  274. double_blocks = (1 << (ptrs_bits * 2));
  275. int n = 0;
  276. int final = 0;
  277. if (i_block < direct_blocks) {
  278. offsets[n++] = i_block;
  279. final = direct_blocks;
  280. } else if ((i_block -= direct_blocks) < indirect_blocks) {
  281. offsets[n++] = EXT4_IND_BLOCK;
  282. offsets[n++] = i_block;
  283. final = ptrs;
  284. } else if ((i_block -= indirect_blocks) < double_blocks) {
  285. offsets[n++] = EXT4_DIND_BLOCK;
  286. offsets[n++] = i_block >> ptrs_bits;
  287. offsets[n++] = i_block & (ptrs - 1);
  288. final = ptrs;
  289. } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  290. offsets[n++] = EXT4_TIND_BLOCK;
  291. offsets[n++] = i_block >> (ptrs_bits * 2);
  292. offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  293. offsets[n++] = i_block & (ptrs - 1);
  294. final = ptrs;
  295. } else {
  296. ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
  297. i_block + direct_blocks +
  298. indirect_blocks + double_blocks, inode->i_ino);
  299. }
  300. if (boundary)
  301. *boundary = final - 1 - (i_block & (ptrs - 1));
  302. return n;
  303. }
  304. static int __ext4_check_blockref(const char *function, struct inode *inode,
  305. __le32 *p, unsigned int max)
  306. {
  307. __le32 *bref = p;
  308. unsigned int blk;
  309. while (bref < p+max) {
  310. blk = le32_to_cpu(*bref++);
  311. if (blk &&
  312. unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
  313. blk, 1))) {
  314. __ext4_error(inode->i_sb, function,
  315. "invalid block reference %u "
  316. "in inode #%lu", blk, inode->i_ino);
  317. return -EIO;
  318. }
  319. }
  320. return 0;
  321. }
  322. #define ext4_check_indirect_blockref(inode, bh) \
  323. __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \
  324. EXT4_ADDR_PER_BLOCK((inode)->i_sb))
  325. #define ext4_check_inode_blockref(inode) \
  326. __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \
  327. EXT4_NDIR_BLOCKS)
  328. /**
  329. * ext4_get_branch - read the chain of indirect blocks leading to data
  330. * @inode: inode in question
  331. * @depth: depth of the chain (1 - direct pointer, etc.)
  332. * @offsets: offsets of pointers in inode/indirect blocks
  333. * @chain: place to store the result
  334. * @err: here we store the error value
  335. *
  336. * Function fills the array of triples <key, p, bh> and returns %NULL
  337. * if everything went OK or the pointer to the last filled triple
  338. * (incomplete one) otherwise. Upon the return chain[i].key contains
  339. * the number of (i+1)-th block in the chain (as it is stored in memory,
  340. * i.e. little-endian 32-bit), chain[i].p contains the address of that
  341. * number (it points into struct inode for i==0 and into the bh->b_data
  342. * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
  343. * block for i>0 and NULL for i==0. In other words, it holds the block
  344. * numbers of the chain, addresses they were taken from (and where we can
  345. * verify that chain did not change) and buffer_heads hosting these
  346. * numbers.
  347. *
  348. * Function stops when it stumbles upon zero pointer (absent block)
  349. * (pointer to last triple returned, *@err == 0)
  350. * or when it gets an IO error reading an indirect block
  351. * (ditto, *@err == -EIO)
  352. * or when it reads all @depth-1 indirect blocks successfully and finds
  353. * the whole chain, all way to the data (returns %NULL, *err == 0).
  354. *
  355. * Need to be called with
  356. * down_read(&EXT4_I(inode)->i_data_sem)
  357. */
  358. static Indirect *ext4_get_branch(struct inode *inode, int depth,
  359. ext4_lblk_t *offsets,
  360. Indirect chain[4], int *err)
  361. {
  362. struct super_block *sb = inode->i_sb;
  363. Indirect *p = chain;
  364. struct buffer_head *bh;
  365. *err = 0;
  366. /* i_data is not going away, no lock needed */
  367. add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
  368. if (!p->key)
  369. goto no_block;
  370. while (--depth) {
  371. bh = sb_getblk(sb, le32_to_cpu(p->key));
  372. if (unlikely(!bh))
  373. goto failure;
  374. if (!bh_uptodate_or_lock(bh)) {
  375. if (bh_submit_read(bh) < 0) {
  376. put_bh(bh);
  377. goto failure;
  378. }
  379. /* validate block references */
  380. if (ext4_check_indirect_blockref(inode, bh)) {
  381. put_bh(bh);
  382. goto failure;
  383. }
  384. }
  385. add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
  386. /* Reader: end */
  387. if (!p->key)
  388. goto no_block;
  389. }
  390. return NULL;
  391. failure:
  392. *err = -EIO;
  393. no_block:
  394. return p;
  395. }
  396. /**
  397. * ext4_find_near - find a place for allocation with sufficient locality
  398. * @inode: owner
  399. * @ind: descriptor of indirect block.
  400. *
  401. * This function returns the preferred place for block allocation.
  402. * It is used when heuristic for sequential allocation fails.
  403. * Rules are:
  404. * + if there is a block to the left of our position - allocate near it.
  405. * + if pointer will live in indirect block - allocate near that block.
  406. * + if pointer will live in inode - allocate in the same
  407. * cylinder group.
  408. *
  409. * In the latter case we colour the starting block by the callers PID to
  410. * prevent it from clashing with concurrent allocations for a different inode
  411. * in the same block group. The PID is used here so that functionally related
  412. * files will be close-by on-disk.
  413. *
  414. * Caller must make sure that @ind is valid and will stay that way.
  415. */
  416. static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
  417. {
  418. struct ext4_inode_info *ei = EXT4_I(inode);
  419. __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
  420. __le32 *p;
  421. ext4_fsblk_t bg_start;
  422. ext4_fsblk_t last_block;
  423. ext4_grpblk_t colour;
  424. ext4_group_t block_group;
  425. int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
  426. /* Try to find previous block */
  427. for (p = ind->p - 1; p >= start; p--) {
  428. if (*p)
  429. return le32_to_cpu(*p);
  430. }
  431. /* No such thing, so let's try location of indirect block */
  432. if (ind->bh)
  433. return ind->bh->b_blocknr;
  434. /*
  435. * It is going to be referred to from the inode itself? OK, just put it
  436. * into the same cylinder group then.
  437. */
  438. block_group = ei->i_block_group;
  439. if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
  440. block_group &= ~(flex_size-1);
  441. if (S_ISREG(inode->i_mode))
  442. block_group++;
  443. }
  444. bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
  445. last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
  446. /*
  447. * If we are doing delayed allocation, we don't need take
  448. * colour into account.
  449. */
  450. if (test_opt(inode->i_sb, DELALLOC))
  451. return bg_start;
  452. if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
  453. colour = (current->pid % 16) *
  454. (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
  455. else
  456. colour = (current->pid % 16) * ((last_block - bg_start) / 16);
  457. return bg_start + colour;
  458. }
  459. /**
  460. * ext4_find_goal - find a preferred place for allocation.
  461. * @inode: owner
  462. * @block: block we want
  463. * @partial: pointer to the last triple within a chain
  464. *
  465. * Normally this function find the preferred place for block allocation,
  466. * returns it.
  467. * Because this is only used for non-extent files, we limit the block nr
  468. * to 32 bits.
  469. */
  470. static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
  471. Indirect *partial)
  472. {
  473. ext4_fsblk_t goal;
  474. /*
  475. * XXX need to get goal block from mballoc's data structures
  476. */
  477. goal = ext4_find_near(inode, partial);
  478. goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
  479. return goal;
  480. }
  481. /**
  482. * ext4_blks_to_allocate: Look up the block map and count the number
  483. * of direct blocks need to be allocated for the given branch.
  484. *
  485. * @branch: chain of indirect blocks
  486. * @k: number of blocks need for indirect blocks
  487. * @blks: number of data blocks to be mapped.
  488. * @blocks_to_boundary: the offset in the indirect block
  489. *
  490. * return the total number of blocks to be allocate, including the
  491. * direct and indirect blocks.
  492. */
  493. static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
  494. int blocks_to_boundary)
  495. {
  496. unsigned int count = 0;
  497. /*
  498. * Simple case, [t,d]Indirect block(s) has not allocated yet
  499. * then it's clear blocks on that path have not allocated
  500. */
  501. if (k > 0) {
  502. /* right now we don't handle cross boundary allocation */
  503. if (blks < blocks_to_boundary + 1)
  504. count += blks;
  505. else
  506. count += blocks_to_boundary + 1;
  507. return count;
  508. }
  509. count++;
  510. while (count < blks && count <= blocks_to_boundary &&
  511. le32_to_cpu(*(branch[0].p + count)) == 0) {
  512. count++;
  513. }
  514. return count;
  515. }
  516. /**
  517. * ext4_alloc_blocks: multiple allocate blocks needed for a branch
  518. * @indirect_blks: the number of blocks need to allocate for indirect
  519. * blocks
  520. *
  521. * @new_blocks: on return it will store the new block numbers for
  522. * the indirect blocks(if needed) and the first direct block,
  523. * @blks: on return it will store the total number of allocated
  524. * direct blocks
  525. */
  526. static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
  527. ext4_lblk_t iblock, ext4_fsblk_t goal,
  528. int indirect_blks, int blks,
  529. ext4_fsblk_t new_blocks[4], int *err)
  530. {
  531. struct ext4_allocation_request ar;
  532. int target, i;
  533. unsigned long count = 0, blk_allocated = 0;
  534. int index = 0;
  535. ext4_fsblk_t current_block = 0;
  536. int ret = 0;
  537. /*
  538. * Here we try to allocate the requested multiple blocks at once,
  539. * on a best-effort basis.
  540. * To build a branch, we should allocate blocks for
  541. * the indirect blocks(if not allocated yet), and at least
  542. * the first direct block of this branch. That's the
  543. * minimum number of blocks need to allocate(required)
  544. */
  545. /* first we try to allocate the indirect blocks */
  546. target = indirect_blks;
  547. while (target > 0) {
  548. count = target;
  549. /* allocating blocks for indirect blocks and direct blocks */
  550. current_block = ext4_new_meta_blocks(handle, inode,
  551. goal, &count, err);
  552. if (*err)
  553. goto failed_out;
  554. if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
  555. EXT4_ERROR_INODE(inode,
  556. "current_block %llu + count %lu > %d!",
  557. current_block, count,
  558. EXT4_MAX_BLOCK_FILE_PHYS);
  559. *err = -EIO;
  560. goto failed_out;
  561. }
  562. target -= count;
  563. /* allocate blocks for indirect blocks */
  564. while (index < indirect_blks && count) {
  565. new_blocks[index++] = current_block++;
  566. count--;
  567. }
  568. if (count > 0) {
  569. /*
  570. * save the new block number
  571. * for the first direct block
  572. */
  573. new_blocks[index] = current_block;
  574. printk(KERN_INFO "%s returned more blocks than "
  575. "requested\n", __func__);
  576. WARN_ON(1);
  577. break;
  578. }
  579. }
  580. target = blks - count ;
  581. blk_allocated = count;
  582. if (!target)
  583. goto allocated;
  584. /* Now allocate data blocks */
  585. memset(&ar, 0, sizeof(ar));
  586. ar.inode = inode;
  587. ar.goal = goal;
  588. ar.len = target;
  589. ar.logical = iblock;
  590. if (S_ISREG(inode->i_mode))
  591. /* enable in-core preallocation only for regular files */
  592. ar.flags = EXT4_MB_HINT_DATA;
  593. current_block = ext4_mb_new_blocks(handle, &ar, err);
  594. if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
  595. EXT4_ERROR_INODE(inode,
  596. "current_block %llu + ar.len %d > %d!",
  597. current_block, ar.len,
  598. EXT4_MAX_BLOCK_FILE_PHYS);
  599. *err = -EIO;
  600. goto failed_out;
  601. }
  602. if (*err && (target == blks)) {
  603. /*
  604. * if the allocation failed and we didn't allocate
  605. * any blocks before
  606. */
  607. goto failed_out;
  608. }
  609. if (!*err) {
  610. if (target == blks) {
  611. /*
  612. * save the new block number
  613. * for the first direct block
  614. */
  615. new_blocks[index] = current_block;
  616. }
  617. blk_allocated += ar.len;
  618. }
  619. allocated:
  620. /* total number of blocks allocated for direct blocks */
  621. ret = blk_allocated;
  622. *err = 0;
  623. return ret;
  624. failed_out:
  625. for (i = 0; i < index; i++)
  626. ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
  627. return ret;
  628. }
  629. /**
  630. * ext4_alloc_branch - allocate and set up a chain of blocks.
  631. * @inode: owner
  632. * @indirect_blks: number of allocated indirect blocks
  633. * @blks: number of allocated direct blocks
  634. * @offsets: offsets (in the blocks) to store the pointers to next.
  635. * @branch: place to store the chain in.
  636. *
  637. * This function allocates blocks, zeroes out all but the last one,
  638. * links them into chain and (if we are synchronous) writes them to disk.
  639. * In other words, it prepares a branch that can be spliced onto the
  640. * inode. It stores the information about that chain in the branch[], in
  641. * the same format as ext4_get_branch() would do. We are calling it after
  642. * we had read the existing part of chain and partial points to the last
  643. * triple of that (one with zero ->key). Upon the exit we have the same
  644. * picture as after the successful ext4_get_block(), except that in one
  645. * place chain is disconnected - *branch->p is still zero (we did not
  646. * set the last link), but branch->key contains the number that should
  647. * be placed into *branch->p to fill that gap.
  648. *
  649. * If allocation fails we free all blocks we've allocated (and forget
  650. * their buffer_heads) and return the error value the from failed
  651. * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
  652. * as described above and return 0.
  653. */
  654. static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
  655. ext4_lblk_t iblock, int indirect_blks,
  656. int *blks, ext4_fsblk_t goal,
  657. ext4_lblk_t *offsets, Indirect *branch)
  658. {
  659. int blocksize = inode->i_sb->s_blocksize;
  660. int i, n = 0;
  661. int err = 0;
  662. struct buffer_head *bh;
  663. int num;
  664. ext4_fsblk_t new_blocks[4];
  665. ext4_fsblk_t current_block;
  666. num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
  667. *blks, new_blocks, &err);
  668. if (err)
  669. return err;
  670. branch[0].key = cpu_to_le32(new_blocks[0]);
  671. /*
  672. * metadata blocks and data blocks are allocated.
  673. */
  674. for (n = 1; n <= indirect_blks; n++) {
  675. /*
  676. * Get buffer_head for parent block, zero it out
  677. * and set the pointer to new one, then send
  678. * parent to disk.
  679. */
  680. bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
  681. branch[n].bh = bh;
  682. lock_buffer(bh);
  683. BUFFER_TRACE(bh, "call get_create_access");
  684. err = ext4_journal_get_create_access(handle, bh);
  685. if (err) {
  686. /* Don't brelse(bh) here; it's done in
  687. * ext4_journal_forget() below */
  688. unlock_buffer(bh);
  689. goto failed;
  690. }
  691. memset(bh->b_data, 0, blocksize);
  692. branch[n].p = (__le32 *) bh->b_data + offsets[n];
  693. branch[n].key = cpu_to_le32(new_blocks[n]);
  694. *branch[n].p = branch[n].key;
  695. if (n == indirect_blks) {
  696. current_block = new_blocks[n];
  697. /*
  698. * End of chain, update the last new metablock of
  699. * the chain to point to the new allocated
  700. * data blocks numbers
  701. */
  702. for (i = 1; i < num; i++)
  703. *(branch[n].p + i) = cpu_to_le32(++current_block);
  704. }
  705. BUFFER_TRACE(bh, "marking uptodate");
  706. set_buffer_uptodate(bh);
  707. unlock_buffer(bh);
  708. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  709. err = ext4_handle_dirty_metadata(handle, inode, bh);
  710. if (err)
  711. goto failed;
  712. }
  713. *blks = num;
  714. return err;
  715. failed:
  716. /* Allocation failed, free what we already allocated */
  717. ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
  718. for (i = 1; i <= n ; i++) {
  719. /*
  720. * branch[i].bh is newly allocated, so there is no
  721. * need to revoke the block, which is why we don't
  722. * need to set EXT4_FREE_BLOCKS_METADATA.
  723. */
  724. ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
  725. EXT4_FREE_BLOCKS_FORGET);
  726. }
  727. for (i = n+1; i < indirect_blks; i++)
  728. ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
  729. ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
  730. return err;
  731. }
  732. /**
  733. * ext4_splice_branch - splice the allocated branch onto inode.
  734. * @inode: owner
  735. * @block: (logical) number of block we are adding
  736. * @chain: chain of indirect blocks (with a missing link - see
  737. * ext4_alloc_branch)
  738. * @where: location of missing link
  739. * @num: number of indirect blocks we are adding
  740. * @blks: number of direct blocks we are adding
  741. *
  742. * This function fills the missing link and does all housekeeping needed in
  743. * inode (->i_blocks, etc.). In case of success we end up with the full
  744. * chain to new block and return 0.
  745. */
  746. static int ext4_splice_branch(handle_t *handle, struct inode *inode,
  747. ext4_lblk_t block, Indirect *where, int num,
  748. int blks)
  749. {
  750. int i;
  751. int err = 0;
  752. ext4_fsblk_t current_block;
  753. /*
  754. * If we're splicing into a [td]indirect block (as opposed to the
  755. * inode) then we need to get write access to the [td]indirect block
  756. * before the splice.
  757. */
  758. if (where->bh) {
  759. BUFFER_TRACE(where->bh, "get_write_access");
  760. err = ext4_journal_get_write_access(handle, where->bh);
  761. if (err)
  762. goto err_out;
  763. }
  764. /* That's it */
  765. *where->p = where->key;
  766. /*
  767. * Update the host buffer_head or inode to point to more just allocated
  768. * direct blocks blocks
  769. */
  770. if (num == 0 && blks > 1) {
  771. current_block = le32_to_cpu(where->key) + 1;
  772. for (i = 1; i < blks; i++)
  773. *(where->p + i) = cpu_to_le32(current_block++);
  774. }
  775. /* We are done with atomic stuff, now do the rest of housekeeping */
  776. /* had we spliced it onto indirect block? */
  777. if (where->bh) {
  778. /*
  779. * If we spliced it onto an indirect block, we haven't
  780. * altered the inode. Note however that if it is being spliced
  781. * onto an indirect block at the very end of the file (the
  782. * file is growing) then we *will* alter the inode to reflect
  783. * the new i_size. But that is not done here - it is done in
  784. * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
  785. */
  786. jbd_debug(5, "splicing indirect only\n");
  787. BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
  788. err = ext4_handle_dirty_metadata(handle, inode, where->bh);
  789. if (err)
  790. goto err_out;
  791. } else {
  792. /*
  793. * OK, we spliced it into the inode itself on a direct block.
  794. */
  795. ext4_mark_inode_dirty(handle, inode);
  796. jbd_debug(5, "splicing direct\n");
  797. }
  798. return err;
  799. err_out:
  800. for (i = 1; i <= num; i++) {
  801. /*
  802. * branch[i].bh is newly allocated, so there is no
  803. * need to revoke the block, which is why we don't
  804. * need to set EXT4_FREE_BLOCKS_METADATA.
  805. */
  806. ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
  807. EXT4_FREE_BLOCKS_FORGET);
  808. }
  809. ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
  810. blks, 0);
  811. return err;
  812. }
  813. /*
  814. * The ext4_ind_map_blocks() function handles non-extents inodes
  815. * (i.e., using the traditional indirect/double-indirect i_blocks
  816. * scheme) for ext4_map_blocks().
  817. *
  818. * Allocation strategy is simple: if we have to allocate something, we will
  819. * have to go the whole way to leaf. So let's do it before attaching anything
  820. * to tree, set linkage between the newborn blocks, write them if sync is
  821. * required, recheck the path, free and repeat if check fails, otherwise
  822. * set the last missing link (that will protect us from any truncate-generated
  823. * removals - all blocks on the path are immune now) and possibly force the
  824. * write on the parent block.
  825. * That has a nice additional property: no special recovery from the failed
  826. * allocations is needed - we simply release blocks and do not touch anything
  827. * reachable from inode.
  828. *
  829. * `handle' can be NULL if create == 0.
  830. *
  831. * return > 0, # of blocks mapped or allocated.
  832. * return = 0, if plain lookup failed.
  833. * return < 0, error case.
  834. *
  835. * The ext4_ind_get_blocks() function should be called with
  836. * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
  837. * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
  838. * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
  839. * blocks.
  840. */
  841. static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
  842. struct ext4_map_blocks *map,
  843. int flags)
  844. {
  845. int err = -EIO;
  846. ext4_lblk_t offsets[4];
  847. Indirect chain[4];
  848. Indirect *partial;
  849. ext4_fsblk_t goal;
  850. int indirect_blks;
  851. int blocks_to_boundary = 0;
  852. int depth;
  853. int count = 0;
  854. ext4_fsblk_t first_block = 0;
  855. J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
  856. J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
  857. depth = ext4_block_to_path(inode, map->m_lblk, offsets,
  858. &blocks_to_boundary);
  859. if (depth == 0)
  860. goto out;
  861. partial = ext4_get_branch(inode, depth, offsets, chain, &err);
  862. /* Simplest case - block found, no allocation needed */
  863. if (!partial) {
  864. first_block = le32_to_cpu(chain[depth - 1].key);
  865. count++;
  866. /*map more blocks*/
  867. while (count < map->m_len && count <= blocks_to_boundary) {
  868. ext4_fsblk_t blk;
  869. blk = le32_to_cpu(*(chain[depth-1].p + count));
  870. if (blk == first_block + count)
  871. count++;
  872. else
  873. break;
  874. }
  875. goto got_it;
  876. }
  877. /* Next simple case - plain lookup or failed read of indirect block */
  878. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
  879. goto cleanup;
  880. /*
  881. * Okay, we need to do block allocation.
  882. */
  883. goal = ext4_find_goal(inode, map->m_lblk, partial);
  884. /* the number of blocks need to allocate for [d,t]indirect blocks */
  885. indirect_blks = (chain + depth) - partial - 1;
  886. /*
  887. * Next look up the indirect map to count the totoal number of
  888. * direct blocks to allocate for this branch.
  889. */
  890. count = ext4_blks_to_allocate(partial, indirect_blks,
  891. map->m_len, blocks_to_boundary);
  892. /*
  893. * Block out ext4_truncate while we alter the tree
  894. */
  895. err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
  896. &count, goal,
  897. offsets + (partial - chain), partial);
  898. /*
  899. * The ext4_splice_branch call will free and forget any buffers
  900. * on the new chain if there is a failure, but that risks using
  901. * up transaction credits, especially for bitmaps where the
  902. * credits cannot be returned. Can we handle this somehow? We
  903. * may need to return -EAGAIN upwards in the worst case. --sct
  904. */
  905. if (!err)
  906. err = ext4_splice_branch(handle, inode, map->m_lblk,
  907. partial, indirect_blks, count);
  908. if (err)
  909. goto cleanup;
  910. map->m_flags |= EXT4_MAP_NEW;
  911. ext4_update_inode_fsync_trans(handle, inode, 1);
  912. got_it:
  913. map->m_flags |= EXT4_MAP_MAPPED;
  914. map->m_pblk = le32_to_cpu(chain[depth-1].key);
  915. map->m_len = count;
  916. if (count > blocks_to_boundary)
  917. map->m_flags |= EXT4_MAP_BOUNDARY;
  918. err = count;
  919. /* Clean up and exit */
  920. partial = chain + depth - 1; /* the whole chain */
  921. cleanup:
  922. while (partial > chain) {
  923. BUFFER_TRACE(partial->bh, "call brelse");
  924. brelse(partial->bh);
  925. partial--;
  926. }
  927. out:
  928. return err;
  929. }
  930. #ifdef CONFIG_QUOTA
  931. qsize_t *ext4_get_reserved_space(struct inode *inode)
  932. {
  933. return &EXT4_I(inode)->i_reserved_quota;
  934. }
  935. #endif
  936. /*
  937. * Calculate the number of metadata blocks need to reserve
  938. * to allocate a new block at @lblocks for non extent file based file
  939. */
  940. static int ext4_indirect_calc_metadata_amount(struct inode *inode,
  941. sector_t lblock)
  942. {
  943. struct ext4_inode_info *ei = EXT4_I(inode);
  944. sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
  945. int blk_bits;
  946. if (lblock < EXT4_NDIR_BLOCKS)
  947. return 0;
  948. lblock -= EXT4_NDIR_BLOCKS;
  949. if (ei->i_da_metadata_calc_len &&
  950. (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
  951. ei->i_da_metadata_calc_len++;
  952. return 0;
  953. }
  954. ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
  955. ei->i_da_metadata_calc_len = 1;
  956. blk_bits = order_base_2(lblock);
  957. return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
  958. }
  959. /*
  960. * Calculate the number of metadata blocks need to reserve
  961. * to allocate a block located at @lblock
  962. */
  963. static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
  964. {
  965. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
  966. return ext4_ext_calc_metadata_amount(inode, lblock);
  967. return ext4_indirect_calc_metadata_amount(inode, lblock);
  968. }
  969. /*
  970. * Called with i_data_sem down, which is important since we can call
  971. * ext4_discard_preallocations() from here.
  972. */
  973. void ext4_da_update_reserve_space(struct inode *inode,
  974. int used, int quota_claim)
  975. {
  976. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  977. struct ext4_inode_info *ei = EXT4_I(inode);
  978. spin_lock(&ei->i_block_reservation_lock);
  979. trace_ext4_da_update_reserve_space(inode, used);
  980. if (unlikely(used > ei->i_reserved_data_blocks)) {
  981. ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
  982. "with only %d reserved data blocks\n",
  983. __func__, inode->i_ino, used,
  984. ei->i_reserved_data_blocks);
  985. WARN_ON(1);
  986. used = ei->i_reserved_data_blocks;
  987. }
  988. /* Update per-inode reservations */
  989. ei->i_reserved_data_blocks -= used;
  990. ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
  991. percpu_counter_sub(&sbi->s_dirtyblocks_counter,
  992. used + ei->i_allocated_meta_blocks);
  993. ei->i_allocated_meta_blocks = 0;
  994. if (ei->i_reserved_data_blocks == 0) {
  995. /*
  996. * We can release all of the reserved metadata blocks
  997. * only when we have written all of the delayed
  998. * allocation blocks.
  999. */
  1000. percpu_counter_sub(&sbi->s_dirtyblocks_counter,
  1001. ei->i_reserved_meta_blocks);
  1002. ei->i_reserved_meta_blocks = 0;
  1003. ei->i_da_metadata_calc_len = 0;
  1004. }
  1005. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1006. /* Update quota subsystem for data blocks */
  1007. if (quota_claim)
  1008. dquot_claim_block(inode, used);
  1009. else {
  1010. /*
  1011. * We did fallocate with an offset that is already delayed
  1012. * allocated. So on delayed allocated writeback we should
  1013. * not re-claim the quota for fallocated blocks.
  1014. */
  1015. dquot_release_reservation_block(inode, used);
  1016. }
  1017. /*
  1018. * If we have done all the pending block allocations and if
  1019. * there aren't any writers on the inode, we can discard the
  1020. * inode's preallocations.
  1021. */
  1022. if ((ei->i_reserved_data_blocks == 0) &&
  1023. (atomic_read(&inode->i_writecount) == 0))
  1024. ext4_discard_preallocations(inode);
  1025. }
  1026. static int check_block_validity(struct inode *inode, const char *msg,
  1027. sector_t logical, sector_t phys, int len)
  1028. {
  1029. if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
  1030. __ext4_error(inode->i_sb, msg,
  1031. "inode #%lu logical block %llu mapped to %llu "
  1032. "(size %d)", inode->i_ino,
  1033. (unsigned long long) logical,
  1034. (unsigned long long) phys, len);
  1035. return -EIO;
  1036. }
  1037. return 0;
  1038. }
  1039. /*
  1040. * Return the number of contiguous dirty pages in a given inode
  1041. * starting at page frame idx.
  1042. */
  1043. static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
  1044. unsigned int max_pages)
  1045. {
  1046. struct address_space *mapping = inode->i_mapping;
  1047. pgoff_t index;
  1048. struct pagevec pvec;
  1049. pgoff_t num = 0;
  1050. int i, nr_pages, done = 0;
  1051. if (max_pages == 0)
  1052. return 0;
  1053. pagevec_init(&pvec, 0);
  1054. while (!done) {
  1055. index = idx;
  1056. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  1057. PAGECACHE_TAG_DIRTY,
  1058. (pgoff_t)PAGEVEC_SIZE);
  1059. if (nr_pages == 0)
  1060. break;
  1061. for (i = 0; i < nr_pages; i++) {
  1062. struct page *page = pvec.pages[i];
  1063. struct buffer_head *bh, *head;
  1064. lock_page(page);
  1065. if (unlikely(page->mapping != mapping) ||
  1066. !PageDirty(page) ||
  1067. PageWriteback(page) ||
  1068. page->index != idx) {
  1069. done = 1;
  1070. unlock_page(page);
  1071. break;
  1072. }
  1073. if (page_has_buffers(page)) {
  1074. bh = head = page_buffers(page);
  1075. do {
  1076. if (!buffer_delay(bh) &&
  1077. !buffer_unwritten(bh))
  1078. done = 1;
  1079. bh = bh->b_this_page;
  1080. } while (!done && (bh != head));
  1081. }
  1082. unlock_page(page);
  1083. if (done)
  1084. break;
  1085. idx++;
  1086. num++;
  1087. if (num >= max_pages)
  1088. break;
  1089. }
  1090. pagevec_release(&pvec);
  1091. }
  1092. return num;
  1093. }
  1094. /*
  1095. * The ext4_map_blocks() function tries to look up the requested blocks,
  1096. * and returns if the blocks are already mapped.
  1097. *
  1098. * Otherwise it takes the write lock of the i_data_sem and allocate blocks
  1099. * and store the allocated blocks in the result buffer head and mark it
  1100. * mapped.
  1101. *
  1102. * If file type is extents based, it will call ext4_ext_map_blocks(),
  1103. * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
  1104. * based files
  1105. *
  1106. * On success, it returns the number of blocks being mapped or allocate.
  1107. * if create==0 and the blocks are pre-allocated and uninitialized block,
  1108. * the result buffer head is unmapped. If the create ==1, it will make sure
  1109. * the buffer head is mapped.
  1110. *
  1111. * It returns 0 if plain look up failed (blocks have not been allocated), in
  1112. * that casem, buffer head is unmapped
  1113. *
  1114. * It returns the error in case of allocation failure.
  1115. */
  1116. int ext4_map_blocks(handle_t *handle, struct inode *inode,
  1117. struct ext4_map_blocks *map, int flags)
  1118. {
  1119. int retval;
  1120. map->m_flags = 0;
  1121. ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
  1122. "logical block %lu\n", inode->i_ino, flags, map->m_len,
  1123. (unsigned long) map->m_lblk);
  1124. /*
  1125. * Try to see if we can get the block without requesting a new
  1126. * file system block.
  1127. */
  1128. down_read((&EXT4_I(inode)->i_data_sem));
  1129. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  1130. retval = ext4_ext_map_blocks(handle, inode, map, 0);
  1131. } else {
  1132. retval = ext4_ind_map_blocks(handle, inode, map, 0);
  1133. }
  1134. up_read((&EXT4_I(inode)->i_data_sem));
  1135. if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
  1136. int ret = check_block_validity(inode, "file system corruption",
  1137. map->m_lblk, map->m_pblk, retval);
  1138. if (ret != 0)
  1139. return ret;
  1140. }
  1141. /* If it is only a block(s) look up */
  1142. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
  1143. return retval;
  1144. /*
  1145. * Returns if the blocks have already allocated
  1146. *
  1147. * Note that if blocks have been preallocated
  1148. * ext4_ext_get_block() returns th create = 0
  1149. * with buffer head unmapped.
  1150. */
  1151. if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
  1152. return retval;
  1153. /*
  1154. * When we call get_blocks without the create flag, the
  1155. * BH_Unwritten flag could have gotten set if the blocks
  1156. * requested were part of a uninitialized extent. We need to
  1157. * clear this flag now that we are committed to convert all or
  1158. * part of the uninitialized extent to be an initialized
  1159. * extent. This is because we need to avoid the combination
  1160. * of BH_Unwritten and BH_Mapped flags being simultaneously
  1161. * set on the buffer_head.
  1162. */
  1163. map->m_flags &= ~EXT4_MAP_UNWRITTEN;
  1164. /*
  1165. * New blocks allocate and/or writing to uninitialized extent
  1166. * will possibly result in updating i_data, so we take
  1167. * the write lock of i_data_sem, and call get_blocks()
  1168. * with create == 1 flag.
  1169. */
  1170. down_write((&EXT4_I(inode)->i_data_sem));
  1171. /*
  1172. * if the caller is from delayed allocation writeout path
  1173. * we have already reserved fs blocks for allocation
  1174. * let the underlying get_block() function know to
  1175. * avoid double accounting
  1176. */
  1177. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  1178. EXT4_I(inode)->i_delalloc_reserved_flag = 1;
  1179. /*
  1180. * We need to check for EXT4 here because migrate
  1181. * could have changed the inode type in between
  1182. */
  1183. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  1184. retval = ext4_ext_map_blocks(handle, inode, map, flags);
  1185. } else {
  1186. retval = ext4_ind_map_blocks(handle, inode, map, flags);
  1187. if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
  1188. /*
  1189. * We allocated new blocks which will result in
  1190. * i_data's format changing. Force the migrate
  1191. * to fail by clearing migrate flags
  1192. */
  1193. ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
  1194. }
  1195. /*
  1196. * Update reserved blocks/metadata blocks after successful
  1197. * block allocation which had been deferred till now. We don't
  1198. * support fallocate for non extent files. So we can update
  1199. * reserve space here.
  1200. */
  1201. if ((retval > 0) &&
  1202. (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
  1203. ext4_da_update_reserve_space(inode, retval, 1);
  1204. }
  1205. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  1206. EXT4_I(inode)->i_delalloc_reserved_flag = 0;
  1207. up_write((&EXT4_I(inode)->i_data_sem));
  1208. if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
  1209. int ret = check_block_validity(inode, "file system "
  1210. "corruption after allocation",
  1211. map->m_lblk, map->m_pblk,
  1212. retval);
  1213. if (ret != 0)
  1214. return ret;
  1215. }
  1216. return retval;
  1217. }
  1218. int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
  1219. unsigned int max_blocks, struct buffer_head *bh,
  1220. int flags)
  1221. {
  1222. struct ext4_map_blocks map;
  1223. int ret;
  1224. map.m_lblk = block;
  1225. map.m_len = max_blocks;
  1226. ret = ext4_map_blocks(handle, inode, &map, flags);
  1227. if (ret < 0)
  1228. return ret;
  1229. bh->b_blocknr = map.m_pblk;
  1230. bh->b_size = inode->i_sb->s_blocksize * map.m_len;
  1231. bh->b_bdev = inode->i_sb->s_bdev;
  1232. bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
  1233. return ret;
  1234. }
  1235. /* Maximum number of blocks we map for direct IO at once. */
  1236. #define DIO_MAX_BLOCKS 4096
  1237. int ext4_get_block(struct inode *inode, sector_t iblock,
  1238. struct buffer_head *bh_result, int create)
  1239. {
  1240. handle_t *handle = ext4_journal_current_handle();
  1241. int ret = 0, started = 0;
  1242. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  1243. int dio_credits;
  1244. if (create && !handle) {
  1245. /* Direct IO write... */
  1246. if (max_blocks > DIO_MAX_BLOCKS)
  1247. max_blocks = DIO_MAX_BLOCKS;
  1248. dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
  1249. handle = ext4_journal_start(inode, dio_credits);
  1250. if (IS_ERR(handle)) {
  1251. ret = PTR_ERR(handle);
  1252. goto out;
  1253. }
  1254. started = 1;
  1255. }
  1256. ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
  1257. create ? EXT4_GET_BLOCKS_CREATE : 0);
  1258. if (ret > 0) {
  1259. bh_result->b_size = (ret << inode->i_blkbits);
  1260. ret = 0;
  1261. }
  1262. if (started)
  1263. ext4_journal_stop(handle);
  1264. out:
  1265. return ret;
  1266. }
  1267. /*
  1268. * `handle' can be NULL if create is zero
  1269. */
  1270. struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
  1271. ext4_lblk_t block, int create, int *errp)
  1272. {
  1273. struct buffer_head dummy;
  1274. int fatal = 0, err;
  1275. int flags = 0;
  1276. J_ASSERT(handle != NULL || create == 0);
  1277. dummy.b_state = 0;
  1278. dummy.b_blocknr = -1000;
  1279. buffer_trace_init(&dummy.b_history);
  1280. if (create)
  1281. flags |= EXT4_GET_BLOCKS_CREATE;
  1282. err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags);
  1283. /*
  1284. * ext4_get_blocks() returns number of blocks mapped. 0 in
  1285. * case of a HOLE.
  1286. */
  1287. if (err > 0) {
  1288. if (err > 1)
  1289. WARN_ON(1);
  1290. err = 0;
  1291. }
  1292. *errp = err;
  1293. if (!err && buffer_mapped(&dummy)) {
  1294. struct buffer_head *bh;
  1295. bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
  1296. if (!bh) {
  1297. *errp = -EIO;
  1298. goto err;
  1299. }
  1300. if (buffer_new(&dummy)) {
  1301. J_ASSERT(create != 0);
  1302. J_ASSERT(handle != NULL);
  1303. /*
  1304. * Now that we do not always journal data, we should
  1305. * keep in mind whether this should always journal the
  1306. * new buffer as metadata. For now, regular file
  1307. * writes use ext4_get_block instead, so it's not a
  1308. * problem.
  1309. */
  1310. lock_buffer(bh);
  1311. BUFFER_TRACE(bh, "call get_create_access");
  1312. fatal = ext4_journal_get_create_access(handle, bh);
  1313. if (!fatal && !buffer_uptodate(bh)) {
  1314. memset(bh->b_data, 0, inode->i_sb->s_blocksize);
  1315. set_buffer_uptodate(bh);
  1316. }
  1317. unlock_buffer(bh);
  1318. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  1319. err = ext4_handle_dirty_metadata(handle, inode, bh);
  1320. if (!fatal)
  1321. fatal = err;
  1322. } else {
  1323. BUFFER_TRACE(bh, "not a new buffer");
  1324. }
  1325. if (fatal) {
  1326. *errp = fatal;
  1327. brelse(bh);
  1328. bh = NULL;
  1329. }
  1330. return bh;
  1331. }
  1332. err:
  1333. return NULL;
  1334. }
  1335. struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
  1336. ext4_lblk_t block, int create, int *err)
  1337. {
  1338. struct buffer_head *bh;
  1339. bh = ext4_getblk(handle, inode, block, create, err);
  1340. if (!bh)
  1341. return bh;
  1342. if (buffer_uptodate(bh))
  1343. return bh;
  1344. ll_rw_block(READ_META, 1, &bh);
  1345. wait_on_buffer(bh);
  1346. if (buffer_uptodate(bh))
  1347. return bh;
  1348. put_bh(bh);
  1349. *err = -EIO;
  1350. return NULL;
  1351. }
  1352. static int walk_page_buffers(handle_t *handle,
  1353. struct buffer_head *head,
  1354. unsigned from,
  1355. unsigned to,
  1356. int *partial,
  1357. int (*fn)(handle_t *handle,
  1358. struct buffer_head *bh))
  1359. {
  1360. struct buffer_head *bh;
  1361. unsigned block_start, block_end;
  1362. unsigned blocksize = head->b_size;
  1363. int err, ret = 0;
  1364. struct buffer_head *next;
  1365. for (bh = head, block_start = 0;
  1366. ret == 0 && (bh != head || !block_start);
  1367. block_start = block_end, bh = next) {
  1368. next = bh->b_this_page;
  1369. block_end = block_start + blocksize;
  1370. if (block_end <= from || block_start >= to) {
  1371. if (partial && !buffer_uptodate(bh))
  1372. *partial = 1;
  1373. continue;
  1374. }
  1375. err = (*fn)(handle, bh);
  1376. if (!ret)
  1377. ret = err;
  1378. }
  1379. return ret;
  1380. }
  1381. /*
  1382. * To preserve ordering, it is essential that the hole instantiation and
  1383. * the data write be encapsulated in a single transaction. We cannot
  1384. * close off a transaction and start a new one between the ext4_get_block()
  1385. * and the commit_write(). So doing the jbd2_journal_start at the start of
  1386. * prepare_write() is the right place.
  1387. *
  1388. * Also, this function can nest inside ext4_writepage() ->
  1389. * block_write_full_page(). In that case, we *know* that ext4_writepage()
  1390. * has generated enough buffer credits to do the whole page. So we won't
  1391. * block on the journal in that case, which is good, because the caller may
  1392. * be PF_MEMALLOC.
  1393. *
  1394. * By accident, ext4 can be reentered when a transaction is open via
  1395. * quota file writes. If we were to commit the transaction while thus
  1396. * reentered, there can be a deadlock - we would be holding a quota
  1397. * lock, and the commit would never complete if another thread had a
  1398. * transaction open and was blocking on the quota lock - a ranking
  1399. * violation.
  1400. *
  1401. * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
  1402. * will _not_ run commit under these circumstances because handle->h_ref
  1403. * is elevated. We'll still have enough credits for the tiny quotafile
  1404. * write.
  1405. */
  1406. static int do_journal_get_write_access(handle_t *handle,
  1407. struct buffer_head *bh)
  1408. {
  1409. if (!buffer_mapped(bh) || buffer_freed(bh))
  1410. return 0;
  1411. return ext4_journal_get_write_access(handle, bh);
  1412. }
  1413. /*
  1414. * Truncate blocks that were not used by write. We have to truncate the
  1415. * pagecache as well so that corresponding buffers get properly unmapped.
  1416. */
  1417. static void ext4_truncate_failed_write(struct inode *inode)
  1418. {
  1419. truncate_inode_pages(inode->i_mapping, inode->i_size);
  1420. ext4_truncate(inode);
  1421. }
  1422. static int ext4_get_block_write(struct inode *inode, sector_t iblock,
  1423. struct buffer_head *bh_result, int create);
  1424. static int ext4_write_begin(struct file *file, struct address_space *mapping,
  1425. loff_t pos, unsigned len, unsigned flags,
  1426. struct page **pagep, void **fsdata)
  1427. {
  1428. struct inode *inode = mapping->host;
  1429. int ret, needed_blocks;
  1430. handle_t *handle;
  1431. int retries = 0;
  1432. struct page *page;
  1433. pgoff_t index;
  1434. unsigned from, to;
  1435. trace_ext4_write_begin(inode, pos, len, flags);
  1436. /*
  1437. * Reserve one block more for addition to orphan list in case
  1438. * we allocate blocks but write fails for some reason
  1439. */
  1440. needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
  1441. index = pos >> PAGE_CACHE_SHIFT;
  1442. from = pos & (PAGE_CACHE_SIZE - 1);
  1443. to = from + len;
  1444. retry:
  1445. handle = ext4_journal_start(inode, needed_blocks);
  1446. if (IS_ERR(handle)) {
  1447. ret = PTR_ERR(handle);
  1448. goto out;
  1449. }
  1450. /* We cannot recurse into the filesystem as the transaction is already
  1451. * started */
  1452. flags |= AOP_FLAG_NOFS;
  1453. page = grab_cache_page_write_begin(mapping, index, flags);
  1454. if (!page) {
  1455. ext4_journal_stop(handle);
  1456. ret = -ENOMEM;
  1457. goto out;
  1458. }
  1459. *pagep = page;
  1460. if (ext4_should_dioread_nolock(inode))
  1461. ret = block_write_begin(file, mapping, pos, len, flags, pagep,
  1462. fsdata, ext4_get_block_write);
  1463. else
  1464. ret = block_write_begin(file, mapping, pos, len, flags, pagep,
  1465. fsdata, ext4_get_block);
  1466. if (!ret && ext4_should_journal_data(inode)) {
  1467. ret = walk_page_buffers(handle, page_buffers(page),
  1468. from, to, NULL, do_journal_get_write_access);
  1469. }
  1470. if (ret) {
  1471. unlock_page(page);
  1472. page_cache_release(page);
  1473. /*
  1474. * block_write_begin may have instantiated a few blocks
  1475. * outside i_size. Trim these off again. Don't need
  1476. * i_size_read because we hold i_mutex.
  1477. *
  1478. * Add inode to orphan list in case we crash before
  1479. * truncate finishes
  1480. */
  1481. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  1482. ext4_orphan_add(handle, inode);
  1483. ext4_journal_stop(handle);
  1484. if (pos + len > inode->i_size) {
  1485. ext4_truncate_failed_write(inode);
  1486. /*
  1487. * If truncate failed early the inode might
  1488. * still be on the orphan list; we need to
  1489. * make sure the inode is removed from the
  1490. * orphan list in that case.
  1491. */
  1492. if (inode->i_nlink)
  1493. ext4_orphan_del(NULL, inode);
  1494. }
  1495. }
  1496. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  1497. goto retry;
  1498. out:
  1499. return ret;
  1500. }
  1501. /* For write_end() in data=journal mode */
  1502. static int write_end_fn(handle_t *handle, struct buffer_head *bh)
  1503. {
  1504. if (!buffer_mapped(bh) || buffer_freed(bh))
  1505. return 0;
  1506. set_buffer_uptodate(bh);
  1507. return ext4_handle_dirty_metadata(handle, NULL, bh);
  1508. }
  1509. static int ext4_generic_write_end(struct file *file,
  1510. struct address_space *mapping,
  1511. loff_t pos, unsigned len, unsigned copied,
  1512. struct page *page, void *fsdata)
  1513. {
  1514. int i_size_changed = 0;
  1515. struct inode *inode = mapping->host;
  1516. handle_t *handle = ext4_journal_current_handle();
  1517. copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
  1518. /*
  1519. * No need to use i_size_read() here, the i_size
  1520. * cannot change under us because we hold i_mutex.
  1521. *
  1522. * But it's important to update i_size while still holding page lock:
  1523. * page writeout could otherwise come in and zero beyond i_size.
  1524. */
  1525. if (pos + copied > inode->i_size) {
  1526. i_size_write(inode, pos + copied);
  1527. i_size_changed = 1;
  1528. }
  1529. if (pos + copied > EXT4_I(inode)->i_disksize) {
  1530. /* We need to mark inode dirty even if
  1531. * new_i_size is less that inode->i_size
  1532. * bu greater than i_disksize.(hint delalloc)
  1533. */
  1534. ext4_update_i_disksize(inode, (pos + copied));
  1535. i_size_changed = 1;
  1536. }
  1537. unlock_page(page);
  1538. page_cache_release(page);
  1539. /*
  1540. * Don't mark the inode dirty under page lock. First, it unnecessarily
  1541. * makes the holding time of page lock longer. Second, it forces lock
  1542. * ordering of page lock and transaction start for journaling
  1543. * filesystems.
  1544. */
  1545. if (i_size_changed)
  1546. ext4_mark_inode_dirty(handle, inode);
  1547. return copied;
  1548. }
  1549. /*
  1550. * We need to pick up the new inode size which generic_commit_write gave us
  1551. * `file' can be NULL - eg, when called from page_symlink().
  1552. *
  1553. * ext4 never places buffers on inode->i_mapping->private_list. metadata
  1554. * buffers are managed internally.
  1555. */
  1556. static int ext4_ordered_write_end(struct file *file,
  1557. struct address_space *mapping,
  1558. loff_t pos, unsigned len, unsigned copied,
  1559. struct page *page, void *fsdata)
  1560. {
  1561. handle_t *handle = ext4_journal_current_handle();
  1562. struct inode *inode = mapping->host;
  1563. int ret = 0, ret2;
  1564. trace_ext4_ordered_write_end(inode, pos, len, copied);
  1565. ret = ext4_jbd2_file_inode(handle, inode);
  1566. if (ret == 0) {
  1567. ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
  1568. page, fsdata);
  1569. copied = ret2;
  1570. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  1571. /* if we have allocated more blocks and copied
  1572. * less. We will have blocks allocated outside
  1573. * inode->i_size. So truncate them
  1574. */
  1575. ext4_orphan_add(handle, inode);
  1576. if (ret2 < 0)
  1577. ret = ret2;
  1578. }
  1579. ret2 = ext4_journal_stop(handle);
  1580. if (!ret)
  1581. ret = ret2;
  1582. if (pos + len > inode->i_size) {
  1583. ext4_truncate_failed_write(inode);
  1584. /*
  1585. * If truncate failed early the inode might still be
  1586. * on the orphan list; we need to make sure the inode
  1587. * is removed from the orphan list in that case.
  1588. */
  1589. if (inode->i_nlink)
  1590. ext4_orphan_del(NULL, inode);
  1591. }
  1592. return ret ? ret : copied;
  1593. }
  1594. static int ext4_writeback_write_end(struct file *file,
  1595. struct address_space *mapping,
  1596. loff_t pos, unsigned len, unsigned copied,
  1597. struct page *page, void *fsdata)
  1598. {
  1599. handle_t *handle = ext4_journal_current_handle();
  1600. struct inode *inode = mapping->host;
  1601. int ret = 0, ret2;
  1602. trace_ext4_writeback_write_end(inode, pos, len, copied);
  1603. ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
  1604. page, fsdata);
  1605. copied = ret2;
  1606. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  1607. /* if we have allocated more blocks and copied
  1608. * less. We will have blocks allocated outside
  1609. * inode->i_size. So truncate them
  1610. */
  1611. ext4_orphan_add(handle, inode);
  1612. if (ret2 < 0)
  1613. ret = ret2;
  1614. ret2 = ext4_journal_stop(handle);
  1615. if (!ret)
  1616. ret = ret2;
  1617. if (pos + len > inode->i_size) {
  1618. ext4_truncate_failed_write(inode);
  1619. /*
  1620. * If truncate failed early the inode might still be
  1621. * on the orphan list; we need to make sure the inode
  1622. * is removed from the orphan list in that case.
  1623. */
  1624. if (inode->i_nlink)
  1625. ext4_orphan_del(NULL, inode);
  1626. }
  1627. return ret ? ret : copied;
  1628. }
  1629. static int ext4_journalled_write_end(struct file *file,
  1630. struct address_space *mapping,
  1631. loff_t pos, unsigned len, unsigned copied,
  1632. struct page *page, void *fsdata)
  1633. {
  1634. handle_t *handle = ext4_journal_current_handle();
  1635. struct inode *inode = mapping->host;
  1636. int ret = 0, ret2;
  1637. int partial = 0;
  1638. unsigned from, to;
  1639. loff_t new_i_size;
  1640. trace_ext4_journalled_write_end(inode, pos, len, copied);
  1641. from = pos & (PAGE_CACHE_SIZE - 1);
  1642. to = from + len;
  1643. if (copied < len) {
  1644. if (!PageUptodate(page))
  1645. copied = 0;
  1646. page_zero_new_buffers(page, from+copied, to);
  1647. }
  1648. ret = walk_page_buffers(handle, page_buffers(page), from,
  1649. to, &partial, write_end_fn);
  1650. if (!partial)
  1651. SetPageUptodate(page);
  1652. new_i_size = pos + copied;
  1653. if (new_i_size > inode->i_size)
  1654. i_size_write(inode, pos+copied);
  1655. ext4_set_inode_state(inode, EXT4_STATE_JDATA);
  1656. if (new_i_size > EXT4_I(inode)->i_disksize) {
  1657. ext4_update_i_disksize(inode, new_i_size);
  1658. ret2 = ext4_mark_inode_dirty(handle, inode);
  1659. if (!ret)
  1660. ret = ret2;
  1661. }
  1662. unlock_page(page);
  1663. page_cache_release(page);
  1664. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  1665. /* if we have allocated more blocks and copied
  1666. * less. We will have blocks allocated outside
  1667. * inode->i_size. So truncate them
  1668. */
  1669. ext4_orphan_add(handle, inode);
  1670. ret2 = ext4_journal_stop(handle);
  1671. if (!ret)
  1672. ret = ret2;
  1673. if (pos + len > inode->i_size) {
  1674. ext4_truncate_failed_write(inode);
  1675. /*
  1676. * If truncate failed early the inode might still be
  1677. * on the orphan list; we need to make sure the inode
  1678. * is removed from the orphan list in that case.
  1679. */
  1680. if (inode->i_nlink)
  1681. ext4_orphan_del(NULL, inode);
  1682. }
  1683. return ret ? ret : copied;
  1684. }
  1685. /*
  1686. * Reserve a single block located at lblock
  1687. */
  1688. static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
  1689. {
  1690. int retries = 0;
  1691. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1692. struct ext4_inode_info *ei = EXT4_I(inode);
  1693. unsigned long md_needed;
  1694. int ret;
  1695. /*
  1696. * recalculate the amount of metadata blocks to reserve
  1697. * in order to allocate nrblocks
  1698. * worse case is one extent per block
  1699. */
  1700. repeat:
  1701. spin_lock(&ei->i_block_reservation_lock);
  1702. md_needed = ext4_calc_metadata_amount(inode, lblock);
  1703. trace_ext4_da_reserve_space(inode, md_needed);
  1704. spin_unlock(&ei->i_block_reservation_lock);
  1705. /*
  1706. * We will charge metadata quota at writeout time; this saves
  1707. * us from metadata over-estimation, though we may go over by
  1708. * a small amount in the end. Here we just reserve for data.
  1709. */
  1710. ret = dquot_reserve_block(inode, 1);
  1711. if (ret)
  1712. return ret;
  1713. /*
  1714. * We do still charge estimated metadata to the sb though;
  1715. * we cannot afford to run out of free blocks.
  1716. */
  1717. if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
  1718. dquot_release_reservation_block(inode, 1);
  1719. if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
  1720. yield();
  1721. goto repeat;
  1722. }
  1723. return -ENOSPC;
  1724. }
  1725. spin_lock(&ei->i_block_reservation_lock);
  1726. ei->i_reserved_data_blocks++;
  1727. ei->i_reserved_meta_blocks += md_needed;
  1728. spin_unlock(&ei->i_block_reservation_lock);
  1729. return 0; /* success */
  1730. }
  1731. static void ext4_da_release_space(struct inode *inode, int to_free)
  1732. {
  1733. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1734. struct ext4_inode_info *ei = EXT4_I(inode);
  1735. if (!to_free)
  1736. return; /* Nothing to release, exit */
  1737. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  1738. if (unlikely(to_free > ei->i_reserved_data_blocks)) {
  1739. /*
  1740. * if there aren't enough reserved blocks, then the
  1741. * counter is messed up somewhere. Since this
  1742. * function is called from invalidate page, it's
  1743. * harmless to return without any action.
  1744. */
  1745. ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
  1746. "ino %lu, to_free %d with only %d reserved "
  1747. "data blocks\n", inode->i_ino, to_free,
  1748. ei->i_reserved_data_blocks);
  1749. WARN_ON(1);
  1750. to_free = ei->i_reserved_data_blocks;
  1751. }
  1752. ei->i_reserved_data_blocks -= to_free;
  1753. if (ei->i_reserved_data_blocks == 0) {
  1754. /*
  1755. * We can release all of the reserved metadata blocks
  1756. * only when we have written all of the delayed
  1757. * allocation blocks.
  1758. */
  1759. percpu_counter_sub(&sbi->s_dirtyblocks_counter,
  1760. ei->i_reserved_meta_blocks);
  1761. ei->i_reserved_meta_blocks = 0;
  1762. ei->i_da_metadata_calc_len = 0;
  1763. }
  1764. /* update fs dirty data blocks counter */
  1765. percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
  1766. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1767. dquot_release_reservation_block(inode, to_free);
  1768. }
  1769. static void ext4_da_page_release_reservation(struct page *page,
  1770. unsigned long offset)
  1771. {
  1772. int to_release = 0;
  1773. struct buffer_head *head, *bh;
  1774. unsigned int curr_off = 0;
  1775. head = page_buffers(page);
  1776. bh = head;
  1777. do {
  1778. unsigned int next_off = curr_off + bh->b_size;
  1779. if ((offset <= curr_off) && (buffer_delay(bh))) {
  1780. to_release++;
  1781. clear_buffer_delay(bh);
  1782. }
  1783. curr_off = next_off;
  1784. } while ((bh = bh->b_this_page) != head);
  1785. ext4_da_release_space(page->mapping->host, to_release);
  1786. }
  1787. /*
  1788. * Delayed allocation stuff
  1789. */
  1790. /*
  1791. * mpage_da_submit_io - walks through extent of pages and try to write
  1792. * them with writepage() call back
  1793. *
  1794. * @mpd->inode: inode
  1795. * @mpd->first_page: first page of the extent
  1796. * @mpd->next_page: page after the last page of the extent
  1797. *
  1798. * By the time mpage_da_submit_io() is called we expect all blocks
  1799. * to be allocated. this may be wrong if allocation failed.
  1800. *
  1801. * As pages are already locked by write_cache_pages(), we can't use it
  1802. */
  1803. static int mpage_da_submit_io(struct mpage_da_data *mpd)
  1804. {
  1805. long pages_skipped;
  1806. struct pagevec pvec;
  1807. unsigned long index, end;
  1808. int ret = 0, err, nr_pages, i;
  1809. struct inode *inode = mpd->inode;
  1810. struct address_space *mapping = inode->i_mapping;
  1811. BUG_ON(mpd->next_page <= mpd->first_page);
  1812. /*
  1813. * We need to start from the first_page to the next_page - 1
  1814. * to make sure we also write the mapped dirty buffer_heads.
  1815. * If we look at mpd->b_blocknr we would only be looking
  1816. * at the currently mapped buffer_heads.
  1817. */
  1818. index = mpd->first_page;
  1819. end = mpd->next_page - 1;
  1820. pagevec_init(&pvec, 0);
  1821. while (index <= end) {
  1822. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1823. if (nr_pages == 0)
  1824. break;
  1825. for (i = 0; i < nr_pages; i++) {
  1826. struct page *page = pvec.pages[i];
  1827. index = page->index;
  1828. if (index > end)
  1829. break;
  1830. index++;
  1831. BUG_ON(!PageLocked(page));
  1832. BUG_ON(PageWriteback(page));
  1833. pages_skipped = mpd->wbc->pages_skipped;
  1834. err = mapping->a_ops->writepage(page, mpd->wbc);
  1835. if (!err && (pages_skipped == mpd->wbc->pages_skipped))
  1836. /*
  1837. * have successfully written the page
  1838. * without skipping the same
  1839. */
  1840. mpd->pages_written++;
  1841. /*
  1842. * In error case, we have to continue because
  1843. * remaining pages are still locked
  1844. * XXX: unlock and re-dirty them?
  1845. */
  1846. if (ret == 0)
  1847. ret = err;
  1848. }
  1849. pagevec_release(&pvec);
  1850. }
  1851. return ret;
  1852. }
  1853. /*
  1854. * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
  1855. *
  1856. * @mpd->inode - inode to walk through
  1857. * @exbh->b_blocknr - first block on a disk
  1858. * @exbh->b_size - amount of space in bytes
  1859. * @logical - first logical block to start assignment with
  1860. *
  1861. * the function goes through all passed space and put actual disk
  1862. * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
  1863. */
  1864. static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
  1865. struct buffer_head *exbh)
  1866. {
  1867. struct inode *inode = mpd->inode;
  1868. struct address_space *mapping = inode->i_mapping;
  1869. int blocks = exbh->b_size >> inode->i_blkbits;
  1870. sector_t pblock = exbh->b_blocknr, cur_logical;
  1871. struct buffer_head *head, *bh;
  1872. pgoff_t index, end;
  1873. struct pagevec pvec;
  1874. int nr_pages, i;
  1875. index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1876. end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1877. cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1878. pagevec_init(&pvec, 0);
  1879. while (index <= end) {
  1880. /* XXX: optimize tail */
  1881. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1882. if (nr_pages == 0)
  1883. break;
  1884. for (i = 0; i < nr_pages; i++) {
  1885. struct page *page = pvec.pages[i];
  1886. index = page->index;
  1887. if (index > end)
  1888. break;
  1889. index++;
  1890. BUG_ON(!PageLocked(page));
  1891. BUG_ON(PageWriteback(page));
  1892. BUG_ON(!page_has_buffers(page));
  1893. bh = page_buffers(page);
  1894. head = bh;
  1895. /* skip blocks out of the range */
  1896. do {
  1897. if (cur_logical >= logical)
  1898. break;
  1899. cur_logical++;
  1900. } while ((bh = bh->b_this_page) != head);
  1901. do {
  1902. if (cur_logical >= logical + blocks)
  1903. break;
  1904. if (buffer_delay(bh) ||
  1905. buffer_unwritten(bh)) {
  1906. BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
  1907. if (buffer_delay(bh)) {
  1908. clear_buffer_delay(bh);
  1909. bh->b_blocknr = pblock;
  1910. } else {
  1911. /*
  1912. * unwritten already should have
  1913. * blocknr assigned. Verify that
  1914. */
  1915. clear_buffer_unwritten(bh);
  1916. BUG_ON(bh->b_blocknr != pblock);
  1917. }
  1918. } else if (buffer_mapped(bh))
  1919. BUG_ON(bh->b_blocknr != pblock);
  1920. if (buffer_uninit(exbh))
  1921. set_buffer_uninit(bh);
  1922. cur_logical++;
  1923. pblock++;
  1924. } while ((bh = bh->b_this_page) != head);
  1925. }
  1926. pagevec_release(&pvec);
  1927. }
  1928. }
  1929. /*
  1930. * __unmap_underlying_blocks - just a helper function to unmap
  1931. * set of blocks described by @bh
  1932. */
  1933. static inline void __unmap_underlying_blocks(struct inode *inode,
  1934. struct buffer_head *bh)
  1935. {
  1936. struct block_device *bdev = inode->i_sb->s_bdev;
  1937. int blocks, i;
  1938. blocks = bh->b_size >> inode->i_blkbits;
  1939. for (i = 0; i < blocks; i++)
  1940. unmap_underlying_metadata(bdev, bh->b_blocknr + i);
  1941. }
  1942. static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
  1943. sector_t logical, long blk_cnt)
  1944. {
  1945. int nr_pages, i;
  1946. pgoff_t index, end;
  1947. struct pagevec pvec;
  1948. struct inode *inode = mpd->inode;
  1949. struct address_space *mapping = inode->i_mapping;
  1950. index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1951. end = (logical + blk_cnt - 1) >>
  1952. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1953. while (index <= end) {
  1954. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1955. if (nr_pages == 0)
  1956. break;
  1957. for (i = 0; i < nr_pages; i++) {
  1958. struct page *page = pvec.pages[i];
  1959. if (page->index > end)
  1960. break;
  1961. BUG_ON(!PageLocked(page));
  1962. BUG_ON(PageWriteback(page));
  1963. block_invalidatepage(page, 0);
  1964. ClearPageUptodate(page);
  1965. unlock_page(page);
  1966. }
  1967. index = pvec.pages[nr_pages - 1]->index + 1;
  1968. pagevec_release(&pvec);
  1969. }
  1970. return;
  1971. }
  1972. static void ext4_print_free_blocks(struct inode *inode)
  1973. {
  1974. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1975. printk(KERN_CRIT "Total free blocks count %lld\n",
  1976. ext4_count_free_blocks(inode->i_sb));
  1977. printk(KERN_CRIT "Free/Dirty block details\n");
  1978. printk(KERN_CRIT "free_blocks=%lld\n",
  1979. (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
  1980. printk(KERN_CRIT "dirty_blocks=%lld\n",
  1981. (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
  1982. printk(KERN_CRIT "Block reservation details\n");
  1983. printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
  1984. EXT4_I(inode)->i_reserved_data_blocks);
  1985. printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
  1986. EXT4_I(inode)->i_reserved_meta_blocks);
  1987. return;
  1988. }
  1989. /*
  1990. * mpage_da_map_blocks - go through given space
  1991. *
  1992. * @mpd - bh describing space
  1993. *
  1994. * The function skips space we know is already mapped to disk blocks.
  1995. *
  1996. */
  1997. static int mpage_da_map_blocks(struct mpage_da_data *mpd)
  1998. {
  1999. int err, blks, get_blocks_flags;
  2000. struct buffer_head new;
  2001. sector_t next = mpd->b_blocknr;
  2002. unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
  2003. loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
  2004. handle_t *handle = NULL;
  2005. /*
  2006. * We consider only non-mapped and non-allocated blocks
  2007. */
  2008. if ((mpd->b_state & (1 << BH_Mapped)) &&
  2009. !(mpd->b_state & (1 << BH_Delay)) &&
  2010. !(mpd->b_state & (1 << BH_Unwritten)))
  2011. return 0;
  2012. /*
  2013. * If we didn't accumulate anything to write simply return
  2014. */
  2015. if (!mpd->b_size)
  2016. return 0;
  2017. handle = ext4_journal_current_handle();
  2018. BUG_ON(!handle);
  2019. /*
  2020. * Call ext4_get_blocks() to allocate any delayed allocation
  2021. * blocks, or to convert an uninitialized extent to be
  2022. * initialized (in the case where we have written into
  2023. * one or more preallocated blocks).
  2024. *
  2025. * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
  2026. * indicate that we are on the delayed allocation path. This
  2027. * affects functions in many different parts of the allocation
  2028. * call path. This flag exists primarily because we don't
  2029. * want to change *many* call functions, so ext4_get_blocks()
  2030. * will set the magic i_delalloc_reserved_flag once the
  2031. * inode's allocation semaphore is taken.
  2032. *
  2033. * If the blocks in questions were delalloc blocks, set
  2034. * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
  2035. * variables are updated after the blocks have been allocated.
  2036. */
  2037. new.b_state = 0;
  2038. get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
  2039. if (ext4_should_dioread_nolock(mpd->inode))
  2040. get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
  2041. if (mpd->b_state & (1 << BH_Delay))
  2042. get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
  2043. blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
  2044. &new, get_blocks_flags);
  2045. if (blks < 0) {
  2046. err = blks;
  2047. /*
  2048. * If get block returns with error we simply
  2049. * return. Later writepage will redirty the page and
  2050. * writepages will find the dirty page again
  2051. */
  2052. if (err == -EAGAIN)
  2053. return 0;
  2054. if (err == -ENOSPC &&
  2055. ext4_count_free_blocks(mpd->inode->i_sb)) {
  2056. mpd->retval = err;
  2057. return 0;
  2058. }
  2059. /*
  2060. * get block failure will cause us to loop in
  2061. * writepages, because a_ops->writepage won't be able
  2062. * to make progress. The page will be redirtied by
  2063. * writepage and writepages will again try to write
  2064. * the same.
  2065. */
  2066. ext4_msg(mpd->inode->i_sb, KERN_CRIT,
  2067. "delayed block allocation failed for inode %lu at "
  2068. "logical offset %llu with max blocks %zd with "
  2069. "error %d", mpd->inode->i_ino,
  2070. (unsigned long long) next,
  2071. mpd->b_size >> mpd->inode->i_blkbits, err);
  2072. printk(KERN_CRIT "This should not happen!! "
  2073. "Data will be lost\n");
  2074. if (err == -ENOSPC) {
  2075. ext4_print_free_blocks(mpd->inode);
  2076. }
  2077. /* invalidate all the pages */
  2078. ext4_da_block_invalidatepages(mpd, next,
  2079. mpd->b_size >> mpd->inode->i_blkbits);
  2080. return err;
  2081. }
  2082. BUG_ON(blks == 0);
  2083. new.b_size = (blks << mpd->inode->i_blkbits);
  2084. if (buffer_new(&new))
  2085. __unmap_underlying_blocks(mpd->inode, &new);
  2086. /*
  2087. * If blocks are delayed marked, we need to
  2088. * put actual blocknr and drop delayed bit
  2089. */
  2090. if ((mpd->b_state & (1 << BH_Delay)) ||
  2091. (mpd->b_state & (1 << BH_Unwritten)))
  2092. mpage_put_bnr_to_bhs(mpd, next, &new);
  2093. if (ext4_should_order_data(mpd->inode)) {
  2094. err = ext4_jbd2_file_inode(handle, mpd->inode);
  2095. if (err)
  2096. return err;
  2097. }
  2098. /*
  2099. * Update on-disk size along with block allocation.
  2100. */
  2101. disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
  2102. if (disksize > i_size_read(mpd->inode))
  2103. disksize = i_size_read(mpd->inode);
  2104. if (disksize > EXT4_I(mpd->inode)->i_disksize) {
  2105. ext4_update_i_disksize(mpd->inode, disksize);
  2106. return ext4_mark_inode_dirty(handle, mpd->inode);
  2107. }
  2108. return 0;
  2109. }
  2110. #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
  2111. (1 << BH_Delay) | (1 << BH_Unwritten))
  2112. /*
  2113. * mpage_add_bh_to_extent - try to add one more block to extent of blocks
  2114. *
  2115. * @mpd->lbh - extent of blocks
  2116. * @logical - logical number of the block in the file
  2117. * @bh - bh of the block (used to access block's state)
  2118. *
  2119. * the function is used to collect contig. blocks in same state
  2120. */
  2121. static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
  2122. sector_t logical, size_t b_size,
  2123. unsigned long b_state)
  2124. {
  2125. sector_t next;
  2126. int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
  2127. /*
  2128. * XXX Don't go larger than mballoc is willing to allocate
  2129. * This is a stopgap solution. We eventually need to fold
  2130. * mpage_da_submit_io() into this function and then call
  2131. * ext4_get_blocks() multiple times in a loop
  2132. */
  2133. if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
  2134. goto flush_it;
  2135. /* check if thereserved journal credits might overflow */
  2136. if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
  2137. if (nrblocks >= EXT4_MAX_TRANS_DATA) {
  2138. /*
  2139. * With non-extent format we are limited by the journal
  2140. * credit available. Total credit needed to insert
  2141. * nrblocks contiguous blocks is dependent on the
  2142. * nrblocks. So limit nrblocks.
  2143. */
  2144. goto flush_it;
  2145. } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
  2146. EXT4_MAX_TRANS_DATA) {
  2147. /*
  2148. * Adding the new buffer_head would make it cross the
  2149. * allowed limit for which we have journal credit
  2150. * reserved. So limit the new bh->b_size
  2151. */
  2152. b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
  2153. mpd->inode->i_blkbits;
  2154. /* we will do mpage_da_submit_io in the next loop */
  2155. }
  2156. }
  2157. /*
  2158. * First block in the extent
  2159. */
  2160. if (mpd->b_size == 0) {
  2161. mpd->b_blocknr = logical;
  2162. mpd->b_size = b_size;
  2163. mpd->b_state = b_state & BH_FLAGS;
  2164. return;
  2165. }
  2166. next = mpd->b_blocknr + nrblocks;
  2167. /*
  2168. * Can we merge the block to our big extent?
  2169. */
  2170. if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
  2171. mpd->b_size += b_size;
  2172. return;
  2173. }
  2174. flush_it:
  2175. /*
  2176. * We couldn't merge the block to our extent, so we
  2177. * need to flush current extent and start new one
  2178. */
  2179. if (mpage_da_map_blocks(mpd) == 0)
  2180. mpage_da_submit_io(mpd);
  2181. mpd->io_done = 1;
  2182. return;
  2183. }
  2184. static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
  2185. {
  2186. return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
  2187. }
  2188. /*
  2189. * __mpage_da_writepage - finds extent of pages and blocks
  2190. *
  2191. * @page: page to consider
  2192. * @wbc: not used, we just follow rules
  2193. * @data: context
  2194. *
  2195. * The function finds extents of pages and scan them for all blocks.
  2196. */
  2197. static int __mpage_da_writepage(struct page *page,
  2198. struct writeback_control *wbc, void *data)
  2199. {
  2200. struct mpage_da_data *mpd = data;
  2201. struct inode *inode = mpd->inode;
  2202. struct buffer_head *bh, *head;
  2203. sector_t logical;
  2204. /*
  2205. * Can we merge this page to current extent?
  2206. */
  2207. if (mpd->next_page != page->index) {
  2208. /*
  2209. * Nope, we can't. So, we map non-allocated blocks
  2210. * and start IO on them using writepage()
  2211. */
  2212. if (mpd->next_page != mpd->first_page) {
  2213. if (mpage_da_map_blocks(mpd) == 0)
  2214. mpage_da_submit_io(mpd);
  2215. /*
  2216. * skip rest of the page in the page_vec
  2217. */
  2218. mpd->io_done = 1;
  2219. redirty_page_for_writepage(wbc, page);
  2220. unlock_page(page);
  2221. return MPAGE_DA_EXTENT_TAIL;
  2222. }
  2223. /*
  2224. * Start next extent of pages ...
  2225. */
  2226. mpd->first_page = page->index;
  2227. /*
  2228. * ... and blocks
  2229. */
  2230. mpd->b_size = 0;
  2231. mpd->b_state = 0;
  2232. mpd->b_blocknr = 0;
  2233. }
  2234. mpd->next_page = page->index + 1;
  2235. logical = (sector_t) page->index <<
  2236. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  2237. if (!page_has_buffers(page)) {
  2238. mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
  2239. (1 << BH_Dirty) | (1 << BH_Uptodate));
  2240. if (mpd->io_done)
  2241. return MPAGE_DA_EXTENT_TAIL;
  2242. } else {
  2243. /*
  2244. * Page with regular buffer heads, just add all dirty ones
  2245. */
  2246. head = page_buffers(page);
  2247. bh = head;
  2248. do {
  2249. BUG_ON(buffer_locked(bh));
  2250. /*
  2251. * We need to try to allocate
  2252. * unmapped blocks in the same page.
  2253. * Otherwise we won't make progress
  2254. * with the page in ext4_writepage
  2255. */
  2256. if (ext4_bh_delay_or_unwritten(NULL, bh)) {
  2257. mpage_add_bh_to_extent(mpd, logical,
  2258. bh->b_size,
  2259. bh->b_state);
  2260. if (mpd->io_done)
  2261. return MPAGE_DA_EXTENT_TAIL;
  2262. } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
  2263. /*
  2264. * mapped dirty buffer. We need to update
  2265. * the b_state because we look at
  2266. * b_state in mpage_da_map_blocks. We don't
  2267. * update b_size because if we find an
  2268. * unmapped buffer_head later we need to
  2269. * use the b_state flag of that buffer_head.
  2270. */
  2271. if (mpd->b_size == 0)
  2272. mpd->b_state = bh->b_state & BH_FLAGS;
  2273. }
  2274. logical++;
  2275. } while ((bh = bh->b_this_page) != head);
  2276. }
  2277. return 0;
  2278. }
  2279. /*
  2280. * This is a special get_blocks_t callback which is used by
  2281. * ext4_da_write_begin(). It will either return mapped block or
  2282. * reserve space for a single block.
  2283. *
  2284. * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
  2285. * We also have b_blocknr = -1 and b_bdev initialized properly
  2286. *
  2287. * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
  2288. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
  2289. * initialized properly.
  2290. */
  2291. static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
  2292. struct buffer_head *bh_result, int create)
  2293. {
  2294. int ret = 0;
  2295. sector_t invalid_block = ~((sector_t) 0xffff);
  2296. if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
  2297. invalid_block = ~0;
  2298. BUG_ON(create == 0);
  2299. BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
  2300. /*
  2301. * first, we need to know whether the block is allocated already
  2302. * preallocated blocks are unmapped but should treated
  2303. * the same as allocated blocks.
  2304. */
  2305. ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0);
  2306. if ((ret == 0) && !buffer_delay(bh_result)) {
  2307. /* the block isn't (pre)allocated yet, let's reserve space */
  2308. /*
  2309. * XXX: __block_prepare_write() unmaps passed block,
  2310. * is it OK?
  2311. */
  2312. ret = ext4_da_reserve_space(inode, iblock);
  2313. if (ret)
  2314. /* not enough space to reserve */
  2315. return ret;
  2316. map_bh(bh_result, inode->i_sb, invalid_block);
  2317. set_buffer_new(bh_result);
  2318. set_buffer_delay(bh_result);
  2319. } else if (ret > 0) {
  2320. bh_result->b_size = (ret << inode->i_blkbits);
  2321. if (buffer_unwritten(bh_result)) {
  2322. /* A delayed write to unwritten bh should
  2323. * be marked new and mapped. Mapped ensures
  2324. * that we don't do get_block multiple times
  2325. * when we write to the same offset and new
  2326. * ensures that we do proper zero out for
  2327. * partial write.
  2328. */
  2329. set_buffer_new(bh_result);
  2330. set_buffer_mapped(bh_result);
  2331. }
  2332. ret = 0;
  2333. }
  2334. return ret;
  2335. }
  2336. /*
  2337. * This function is used as a standard get_block_t calback function
  2338. * when there is no desire to allocate any blocks. It is used as a
  2339. * callback function for block_prepare_write(), nobh_writepage(), and
  2340. * block_write_full_page(). These functions should only try to map a
  2341. * single block at a time.
  2342. *
  2343. * Since this function doesn't do block allocations even if the caller
  2344. * requests it by passing in create=1, it is critically important that
  2345. * any caller checks to make sure that any buffer heads are returned
  2346. * by this function are either all already mapped or marked for
  2347. * delayed allocation before calling nobh_writepage() or
  2348. * block_write_full_page(). Otherwise, b_blocknr could be left
  2349. * unitialized, and the page write functions will be taken by
  2350. * surprise.
  2351. */
  2352. static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
  2353. struct buffer_head *bh_result, int create)
  2354. {
  2355. int ret = 0;
  2356. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  2357. BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
  2358. /*
  2359. * we don't want to do block allocation in writepage
  2360. * so call get_block_wrap with create = 0
  2361. */
  2362. ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
  2363. if (ret > 0) {
  2364. bh_result->b_size = (ret << inode->i_blkbits);
  2365. ret = 0;
  2366. }
  2367. return ret;
  2368. }
  2369. static int bget_one(handle_t *handle, struct buffer_head *bh)
  2370. {
  2371. get_bh(bh);
  2372. return 0;
  2373. }
  2374. static int bput_one(handle_t *handle, struct buffer_head *bh)
  2375. {
  2376. put_bh(bh);
  2377. return 0;
  2378. }
  2379. static int __ext4_journalled_writepage(struct page *page,
  2380. unsigned int len)
  2381. {
  2382. struct address_space *mapping = page->mapping;
  2383. struct inode *inode = mapping->host;
  2384. struct buffer_head *page_bufs;
  2385. handle_t *handle = NULL;
  2386. int ret = 0;
  2387. int err;
  2388. page_bufs = page_buffers(page);
  2389. BUG_ON(!page_bufs);
  2390. walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
  2391. /* As soon as we unlock the page, it can go away, but we have
  2392. * references to buffers so we are safe */
  2393. unlock_page(page);
  2394. handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
  2395. if (IS_ERR(handle)) {
  2396. ret = PTR_ERR(handle);
  2397. goto out;
  2398. }
  2399. ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
  2400. do_journal_get_write_access);
  2401. err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
  2402. write_end_fn);
  2403. if (ret == 0)
  2404. ret = err;
  2405. err = ext4_journal_stop(handle);
  2406. if (!ret)
  2407. ret = err;
  2408. walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
  2409. ext4_set_inode_state(inode, EXT4_STATE_JDATA);
  2410. out:
  2411. return ret;
  2412. }
  2413. static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
  2414. static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
  2415. /*
  2416. * Note that we don't need to start a transaction unless we're journaling data
  2417. * because we should have holes filled from ext4_page_mkwrite(). We even don't
  2418. * need to file the inode to the transaction's list in ordered mode because if
  2419. * we are writing back data added by write(), the inode is already there and if
  2420. * we are writing back data modified via mmap(), noone guarantees in which
  2421. * transaction the data will hit the disk. In case we are journaling data, we
  2422. * cannot start transaction directly because transaction start ranks above page
  2423. * lock so we have to do some magic.
  2424. *
  2425. * This function can get called via...
  2426. * - ext4_da_writepages after taking page lock (have journal handle)
  2427. * - journal_submit_inode_data_buffers (no journal handle)
  2428. * - shrink_page_list via pdflush (no journal handle)
  2429. * - grab_page_cache when doing write_begin (have journal handle)
  2430. *
  2431. * We don't do any block allocation in this function. If we have page with
  2432. * multiple blocks we need to write those buffer_heads that are mapped. This
  2433. * is important for mmaped based write. So if we do with blocksize 1K
  2434. * truncate(f, 1024);
  2435. * a = mmap(f, 0, 4096);
  2436. * a[0] = 'a';
  2437. * truncate(f, 4096);
  2438. * we have in the page first buffer_head mapped via page_mkwrite call back
  2439. * but other bufer_heads would be unmapped but dirty(dirty done via the
  2440. * do_wp_page). So writepage should write the first block. If we modify
  2441. * the mmap area beyond 1024 we will again get a page_fault and the
  2442. * page_mkwrite callback will do the block allocation and mark the
  2443. * buffer_heads mapped.
  2444. *
  2445. * We redirty the page if we have any buffer_heads that is either delay or
  2446. * unwritten in the page.
  2447. *
  2448. * We can get recursively called as show below.
  2449. *
  2450. * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
  2451. * ext4_writepage()
  2452. *
  2453. * But since we don't do any block allocation we should not deadlock.
  2454. * Page also have the dirty flag cleared so we don't get recurive page_lock.
  2455. */
  2456. static int ext4_writepage(struct page *page,
  2457. struct writeback_control *wbc)
  2458. {
  2459. int ret = 0;
  2460. loff_t size;
  2461. unsigned int len;
  2462. struct buffer_head *page_bufs = NULL;
  2463. struct inode *inode = page->mapping->host;
  2464. trace_ext4_writepage(inode, page);
  2465. size = i_size_read(inode);
  2466. if (page->index == size >> PAGE_CACHE_SHIFT)
  2467. len = size & ~PAGE_CACHE_MASK;
  2468. else
  2469. len = PAGE_CACHE_SIZE;
  2470. if (page_has_buffers(page)) {
  2471. page_bufs = page_buffers(page);
  2472. if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
  2473. ext4_bh_delay_or_unwritten)) {
  2474. /*
  2475. * We don't want to do block allocation
  2476. * So redirty the page and return
  2477. * We may reach here when we do a journal commit
  2478. * via journal_submit_inode_data_buffers.
  2479. * If we don't have mapping block we just ignore
  2480. * them. We can also reach here via shrink_page_list
  2481. */
  2482. redirty_page_for_writepage(wbc, page);
  2483. unlock_page(page);
  2484. return 0;
  2485. }
  2486. } else {
  2487. /*
  2488. * The test for page_has_buffers() is subtle:
  2489. * We know the page is dirty but it lost buffers. That means
  2490. * that at some moment in time after write_begin()/write_end()
  2491. * has been called all buffers have been clean and thus they
  2492. * must have been written at least once. So they are all
  2493. * mapped and we can happily proceed with mapping them
  2494. * and writing the page.
  2495. *
  2496. * Try to initialize the buffer_heads and check whether
  2497. * all are mapped and non delay. We don't want to
  2498. * do block allocation here.
  2499. */
  2500. ret = block_prepare_write(page, 0, len,
  2501. noalloc_get_block_write);
  2502. if (!ret) {
  2503. page_bufs = page_buffers(page);
  2504. /* check whether all are mapped and non delay */
  2505. if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
  2506. ext4_bh_delay_or_unwritten)) {
  2507. redirty_page_for_writepage(wbc, page);
  2508. unlock_page(page);
  2509. return 0;
  2510. }
  2511. } else {
  2512. /*
  2513. * We can't do block allocation here
  2514. * so just redity the page and unlock
  2515. * and return
  2516. */
  2517. redirty_page_for_writepage(wbc, page);
  2518. unlock_page(page);
  2519. return 0;
  2520. }
  2521. /* now mark the buffer_heads as dirty and uptodate */
  2522. block_commit_write(page, 0, len);
  2523. }
  2524. if (PageChecked(page) && ext4_should_journal_data(inode)) {
  2525. /*
  2526. * It's mmapped pagecache. Add buffers and journal it. There
  2527. * doesn't seem much point in redirtying the page here.
  2528. */
  2529. ClearPageChecked(page);
  2530. return __ext4_journalled_writepage(page, len);
  2531. }
  2532. if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
  2533. ret = nobh_writepage(page, noalloc_get_block_write, wbc);
  2534. else if (page_bufs && buffer_uninit(page_bufs)) {
  2535. ext4_set_bh_endio(page_bufs, inode);
  2536. ret = block_write_full_page_endio(page, noalloc_get_block_write,
  2537. wbc, ext4_end_io_buffer_write);
  2538. } else
  2539. ret = block_write_full_page(page, noalloc_get_block_write,
  2540. wbc);
  2541. return ret;
  2542. }
  2543. /*
  2544. * This is called via ext4_da_writepages() to
  2545. * calulate the total number of credits to reserve to fit
  2546. * a single extent allocation into a single transaction,
  2547. * ext4_da_writpeages() will loop calling this before
  2548. * the block allocation.
  2549. */
  2550. static int ext4_da_writepages_trans_blocks(struct inode *inode)
  2551. {
  2552. int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
  2553. /*
  2554. * With non-extent format the journal credit needed to
  2555. * insert nrblocks contiguous block is dependent on
  2556. * number of contiguous block. So we will limit
  2557. * number of contiguous block to a sane value
  2558. */
  2559. if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
  2560. (max_blocks > EXT4_MAX_TRANS_DATA))
  2561. max_blocks = EXT4_MAX_TRANS_DATA;
  2562. return ext4_chunk_trans_blocks(inode, max_blocks);
  2563. }
  2564. /*
  2565. * write_cache_pages_da - walk the list of dirty pages of the given
  2566. * address space and call the callback function (which usually writes
  2567. * the pages).
  2568. *
  2569. * This is a forked version of write_cache_pages(). Differences:
  2570. * Range cyclic is ignored.
  2571. * no_nrwrite_index_update is always presumed true
  2572. */
  2573. static int write_cache_pages_da(struct address_space *mapping,
  2574. struct writeback_control *wbc,
  2575. struct mpage_da_data *mpd)
  2576. {
  2577. int ret = 0;
  2578. int done = 0;
  2579. struct pagevec pvec;
  2580. int nr_pages;
  2581. pgoff_t index;
  2582. pgoff_t end; /* Inclusive */
  2583. long nr_to_write = wbc->nr_to_write;
  2584. pagevec_init(&pvec, 0);
  2585. index = wbc->range_start >> PAGE_CACHE_SHIFT;
  2586. end = wbc->range_end >> PAGE_CACHE_SHIFT;
  2587. while (!done && (index <= end)) {
  2588. int i;
  2589. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  2590. PAGECACHE_TAG_DIRTY,
  2591. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  2592. if (nr_pages == 0)
  2593. break;
  2594. for (i = 0; i < nr_pages; i++) {
  2595. struct page *page = pvec.pages[i];
  2596. /*
  2597. * At this point, the page may be truncated or
  2598. * invalidated (changing page->mapping to NULL), or
  2599. * even swizzled back from swapper_space to tmpfs file
  2600. * mapping. However, page->index will not change
  2601. * because we have a reference on the page.
  2602. */
  2603. if (page->index > end) {
  2604. done = 1;
  2605. break;
  2606. }
  2607. lock_page(page);
  2608. /*
  2609. * Page truncated or invalidated. We can freely skip it
  2610. * then, even for data integrity operations: the page
  2611. * has disappeared concurrently, so there could be no
  2612. * real expectation of this data interity operation
  2613. * even if there is now a new, dirty page at the same
  2614. * pagecache address.
  2615. */
  2616. if (unlikely(page->mapping != mapping)) {
  2617. continue_unlock:
  2618. unlock_page(page);
  2619. continue;
  2620. }
  2621. if (!PageDirty(page)) {
  2622. /* someone wrote it for us */
  2623. goto continue_unlock;
  2624. }
  2625. if (PageWriteback(page)) {
  2626. if (wbc->sync_mode != WB_SYNC_NONE)
  2627. wait_on_page_writeback(page);
  2628. else
  2629. goto continue_unlock;
  2630. }
  2631. BUG_ON(PageWriteback(page));
  2632. if (!clear_page_dirty_for_io(page))
  2633. goto continue_unlock;
  2634. ret = __mpage_da_writepage(page, wbc, mpd);
  2635. if (unlikely(ret)) {
  2636. if (ret == AOP_WRITEPAGE_ACTIVATE) {
  2637. unlock_page(page);
  2638. ret = 0;
  2639. } else {
  2640. done = 1;
  2641. break;
  2642. }
  2643. }
  2644. if (nr_to_write > 0) {
  2645. nr_to_write--;
  2646. if (nr_to_write == 0 &&
  2647. wbc->sync_mode == WB_SYNC_NONE) {
  2648. /*
  2649. * We stop writing back only if we are
  2650. * not doing integrity sync. In case of
  2651. * integrity sync we have to keep going
  2652. * because someone may be concurrently
  2653. * dirtying pages, and we might have
  2654. * synced a lot of newly appeared dirty
  2655. * pages, but have not synced all of the
  2656. * old dirty pages.
  2657. */
  2658. done = 1;
  2659. break;
  2660. }
  2661. }
  2662. }
  2663. pagevec_release(&pvec);
  2664. cond_resched();
  2665. }
  2666. return ret;
  2667. }
  2668. static int ext4_da_writepages(struct address_space *mapping,
  2669. struct writeback_control *wbc)
  2670. {
  2671. pgoff_t index;
  2672. int range_whole = 0;
  2673. handle_t *handle = NULL;
  2674. struct mpage_da_data mpd;
  2675. struct inode *inode = mapping->host;
  2676. int pages_written = 0;
  2677. long pages_skipped;
  2678. unsigned int max_pages;
  2679. int range_cyclic, cycled = 1, io_done = 0;
  2680. int needed_blocks, ret = 0;
  2681. long desired_nr_to_write, nr_to_writebump = 0;
  2682. loff_t range_start = wbc->range_start;
  2683. struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
  2684. trace_ext4_da_writepages(inode, wbc);
  2685. /*
  2686. * No pages to write? This is mainly a kludge to avoid starting
  2687. * a transaction for special inodes like journal inode on last iput()
  2688. * because that could violate lock ordering on umount
  2689. */
  2690. if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  2691. return 0;
  2692. /*
  2693. * If the filesystem has aborted, it is read-only, so return
  2694. * right away instead of dumping stack traces later on that
  2695. * will obscure the real source of the problem. We test
  2696. * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
  2697. * the latter could be true if the filesystem is mounted
  2698. * read-only, and in that case, ext4_da_writepages should
  2699. * *never* be called, so if that ever happens, we would want
  2700. * the stack trace.
  2701. */
  2702. if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
  2703. return -EROFS;
  2704. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  2705. range_whole = 1;
  2706. range_cyclic = wbc->range_cyclic;
  2707. if (wbc->range_cyclic) {
  2708. index = mapping->writeback_index;
  2709. if (index)
  2710. cycled = 0;
  2711. wbc->range_start = index << PAGE_CACHE_SHIFT;
  2712. wbc->range_end = LLONG_MAX;
  2713. wbc->range_cyclic = 0;
  2714. } else
  2715. index = wbc->range_start >> PAGE_CACHE_SHIFT;
  2716. /*
  2717. * This works around two forms of stupidity. The first is in
  2718. * the writeback code, which caps the maximum number of pages
  2719. * written to be 1024 pages. This is wrong on multiple
  2720. * levels; different architectues have a different page size,
  2721. * which changes the maximum amount of data which gets
  2722. * written. Secondly, 4 megabytes is way too small. XFS
  2723. * forces this value to be 16 megabytes by multiplying
  2724. * nr_to_write parameter by four, and then relies on its
  2725. * allocator to allocate larger extents to make them
  2726. * contiguous. Unfortunately this brings us to the second
  2727. * stupidity, which is that ext4's mballoc code only allocates
  2728. * at most 2048 blocks. So we force contiguous writes up to
  2729. * the number of dirty blocks in the inode, or
  2730. * sbi->max_writeback_mb_bump whichever is smaller.
  2731. */
  2732. max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
  2733. if (!range_cyclic && range_whole)
  2734. desired_nr_to_write = wbc->nr_to_write * 8;
  2735. else
  2736. desired_nr_to_write = ext4_num_dirty_pages(inode, index,
  2737. max_pages);
  2738. if (desired_nr_to_write > max_pages)
  2739. desired_nr_to_write = max_pages;
  2740. if (wbc->nr_to_write < desired_nr_to_write) {
  2741. nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
  2742. wbc->nr_to_write = desired_nr_to_write;
  2743. }
  2744. mpd.wbc = wbc;
  2745. mpd.inode = mapping->host;
  2746. pages_skipped = wbc->pages_skipped;
  2747. retry:
  2748. while (!ret && wbc->nr_to_write > 0) {
  2749. /*
  2750. * we insert one extent at a time. So we need
  2751. * credit needed for single extent allocation.
  2752. * journalled mode is currently not supported
  2753. * by delalloc
  2754. */
  2755. BUG_ON(ext4_should_journal_data(inode));
  2756. needed_blocks = ext4_da_writepages_trans_blocks(inode);
  2757. /* start a new transaction*/
  2758. handle = ext4_journal_start(inode, needed_blocks);
  2759. if (IS_ERR(handle)) {
  2760. ret = PTR_ERR(handle);
  2761. ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
  2762. "%ld pages, ino %lu; err %d", __func__,
  2763. wbc->nr_to_write, inode->i_ino, ret);
  2764. goto out_writepages;
  2765. }
  2766. /*
  2767. * Now call __mpage_da_writepage to find the next
  2768. * contiguous region of logical blocks that need
  2769. * blocks to be allocated by ext4. We don't actually
  2770. * submit the blocks for I/O here, even though
  2771. * write_cache_pages thinks it will, and will set the
  2772. * pages as clean for write before calling
  2773. * __mpage_da_writepage().
  2774. */
  2775. mpd.b_size = 0;
  2776. mpd.b_state = 0;
  2777. mpd.b_blocknr = 0;
  2778. mpd.first_page = 0;
  2779. mpd.next_page = 0;
  2780. mpd.io_done = 0;
  2781. mpd.pages_written = 0;
  2782. mpd.retval = 0;
  2783. ret = write_cache_pages_da(mapping, wbc, &mpd);
  2784. /*
  2785. * If we have a contiguous extent of pages and we
  2786. * haven't done the I/O yet, map the blocks and submit
  2787. * them for I/O.
  2788. */
  2789. if (!mpd.io_done && mpd.next_page != mpd.first_page) {
  2790. if (mpage_da_map_blocks(&mpd) == 0)
  2791. mpage_da_submit_io(&mpd);
  2792. mpd.io_done = 1;
  2793. ret = MPAGE_DA_EXTENT_TAIL;
  2794. }
  2795. trace_ext4_da_write_pages(inode, &mpd);
  2796. wbc->nr_to_write -= mpd.pages_written;
  2797. ext4_journal_stop(handle);
  2798. if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
  2799. /* commit the transaction which would
  2800. * free blocks released in the transaction
  2801. * and try again
  2802. */
  2803. jbd2_journal_force_commit_nested(sbi->s_journal);
  2804. wbc->pages_skipped = pages_skipped;
  2805. ret = 0;
  2806. } else if (ret == MPAGE_DA_EXTENT_TAIL) {
  2807. /*
  2808. * got one extent now try with
  2809. * rest of the pages
  2810. */
  2811. pages_written += mpd.pages_written;
  2812. wbc->pages_skipped = pages_skipped;
  2813. ret = 0;
  2814. io_done = 1;
  2815. } else if (wbc->nr_to_write)
  2816. /*
  2817. * There is no more writeout needed
  2818. * or we requested for a noblocking writeout
  2819. * and we found the device congested
  2820. */
  2821. break;
  2822. }
  2823. if (!io_done && !cycled) {
  2824. cycled = 1;
  2825. index = 0;
  2826. wbc->range_start = index << PAGE_CACHE_SHIFT;
  2827. wbc->range_end = mapping->writeback_index - 1;
  2828. goto retry;
  2829. }
  2830. if (pages_skipped != wbc->pages_skipped)
  2831. ext4_msg(inode->i_sb, KERN_CRIT,
  2832. "This should not happen leaving %s "
  2833. "with nr_to_write = %ld ret = %d",
  2834. __func__, wbc->nr_to_write, ret);
  2835. /* Update index */
  2836. index += pages_written;
  2837. wbc->range_cyclic = range_cyclic;
  2838. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  2839. /*
  2840. * set the writeback_index so that range_cyclic
  2841. * mode will write it back later
  2842. */
  2843. mapping->writeback_index = index;
  2844. out_writepages:
  2845. wbc->nr_to_write -= nr_to_writebump;
  2846. wbc->range_start = range_start;
  2847. trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
  2848. return ret;
  2849. }
  2850. #define FALL_BACK_TO_NONDELALLOC 1
  2851. static int ext4_nonda_switch(struct super_block *sb)
  2852. {
  2853. s64 free_blocks, dirty_blocks;
  2854. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2855. /*
  2856. * switch to non delalloc mode if we are running low
  2857. * on free block. The free block accounting via percpu
  2858. * counters can get slightly wrong with percpu_counter_batch getting
  2859. * accumulated on each CPU without updating global counters
  2860. * Delalloc need an accurate free block accounting. So switch
  2861. * to non delalloc when we are near to error range.
  2862. */
  2863. free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
  2864. dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
  2865. if (2 * free_blocks < 3 * dirty_blocks ||
  2866. free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
  2867. /*
  2868. * free block count is less than 150% of dirty blocks
  2869. * or free blocks is less than watermark
  2870. */
  2871. return 1;
  2872. }
  2873. /*
  2874. * Even if we don't switch but are nearing capacity,
  2875. * start pushing delalloc when 1/2 of free blocks are dirty.
  2876. */
  2877. if (free_blocks < 2 * dirty_blocks)
  2878. writeback_inodes_sb_if_idle(sb);
  2879. return 0;
  2880. }
  2881. static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
  2882. loff_t pos, unsigned len, unsigned flags,
  2883. struct page **pagep, void **fsdata)
  2884. {
  2885. int ret, retries = 0;
  2886. struct page *page;
  2887. pgoff_t index;
  2888. unsigned from, to;
  2889. struct inode *inode = mapping->host;
  2890. handle_t *handle;
  2891. index = pos >> PAGE_CACHE_SHIFT;
  2892. from = pos & (PAGE_CACHE_SIZE - 1);
  2893. to = from + len;
  2894. if (ext4_nonda_switch(inode->i_sb)) {
  2895. *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
  2896. return ext4_write_begin(file, mapping, pos,
  2897. len, flags, pagep, fsdata);
  2898. }
  2899. *fsdata = (void *)0;
  2900. trace_ext4_da_write_begin(inode, pos, len, flags);
  2901. retry:
  2902. /*
  2903. * With delayed allocation, we don't log the i_disksize update
  2904. * if there is delayed block allocation. But we still need
  2905. * to journalling the i_disksize update if writes to the end
  2906. * of file which has an already mapped buffer.
  2907. */
  2908. handle = ext4_journal_start(inode, 1);
  2909. if (IS_ERR(handle)) {
  2910. ret = PTR_ERR(handle);
  2911. goto out;
  2912. }
  2913. /* We cannot recurse into the filesystem as the transaction is already
  2914. * started */
  2915. flags |= AOP_FLAG_NOFS;
  2916. page = grab_cache_page_write_begin(mapping, index, flags);
  2917. if (!page) {
  2918. ext4_journal_stop(handle);
  2919. ret = -ENOMEM;
  2920. goto out;
  2921. }
  2922. *pagep = page;
  2923. ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  2924. ext4_da_get_block_prep);
  2925. if (ret < 0) {
  2926. unlock_page(page);
  2927. ext4_journal_stop(handle);
  2928. page_cache_release(page);
  2929. /*
  2930. * block_write_begin may have instantiated a few blocks
  2931. * outside i_size. Trim these off again. Don't need
  2932. * i_size_read because we hold i_mutex.
  2933. */
  2934. if (pos + len > inode->i_size)
  2935. ext4_truncate_failed_write(inode);
  2936. }
  2937. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  2938. goto retry;
  2939. out:
  2940. return ret;
  2941. }
  2942. /*
  2943. * Check if we should update i_disksize
  2944. * when write to the end of file but not require block allocation
  2945. */
  2946. static int ext4_da_should_update_i_disksize(struct page *page,
  2947. unsigned long offset)
  2948. {
  2949. struct buffer_head *bh;
  2950. struct inode *inode = page->mapping->host;
  2951. unsigned int idx;
  2952. int i;
  2953. bh = page_buffers(page);
  2954. idx = offset >> inode->i_blkbits;
  2955. for (i = 0; i < idx; i++)
  2956. bh = bh->b_this_page;
  2957. if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
  2958. return 0;
  2959. return 1;
  2960. }
  2961. static int ext4_da_write_end(struct file *file,
  2962. struct address_space *mapping,
  2963. loff_t pos, unsigned len, unsigned copied,
  2964. struct page *page, void *fsdata)
  2965. {
  2966. struct inode *inode = mapping->host;
  2967. int ret = 0, ret2;
  2968. handle_t *handle = ext4_journal_current_handle();
  2969. loff_t new_i_size;
  2970. unsigned long start, end;
  2971. int write_mode = (int)(unsigned long)fsdata;
  2972. if (write_mode == FALL_BACK_TO_NONDELALLOC) {
  2973. if (ext4_should_order_data(inode)) {
  2974. return ext4_ordered_write_end(file, mapping, pos,
  2975. len, copied, page, fsdata);
  2976. } else if (ext4_should_writeback_data(inode)) {
  2977. return ext4_writeback_write_end(file, mapping, pos,
  2978. len, copied, page, fsdata);
  2979. } else {
  2980. BUG();
  2981. }
  2982. }
  2983. trace_ext4_da_write_end(inode, pos, len, copied);
  2984. start = pos & (PAGE_CACHE_SIZE - 1);
  2985. end = start + copied - 1;
  2986. /*
  2987. * generic_write_end() will run mark_inode_dirty() if i_size
  2988. * changes. So let's piggyback the i_disksize mark_inode_dirty
  2989. * into that.
  2990. */
  2991. new_i_size = pos + copied;
  2992. if (new_i_size > EXT4_I(inode)->i_disksize) {
  2993. if (ext4_da_should_update_i_disksize(page, end)) {
  2994. down_write(&EXT4_I(inode)->i_data_sem);
  2995. if (new_i_size > EXT4_I(inode)->i_disksize) {
  2996. /*
  2997. * Updating i_disksize when extending file
  2998. * without needing block allocation
  2999. */
  3000. if (ext4_should_order_data(inode))
  3001. ret = ext4_jbd2_file_inode(handle,
  3002. inode);
  3003. EXT4_I(inode)->i_disksize = new_i_size;
  3004. }
  3005. up_write(&EXT4_I(inode)->i_data_sem);
  3006. /* We need to mark inode dirty even if
  3007. * new_i_size is less that inode->i_size
  3008. * bu greater than i_disksize.(hint delalloc)
  3009. */
  3010. ext4_mark_inode_dirty(handle, inode);
  3011. }
  3012. }
  3013. ret2 = generic_write_end(file, mapping, pos, len, copied,
  3014. page, fsdata);
  3015. copied = ret2;
  3016. if (ret2 < 0)
  3017. ret = ret2;
  3018. ret2 = ext4_journal_stop(handle);
  3019. if (!ret)
  3020. ret = ret2;
  3021. return ret ? ret : copied;
  3022. }
  3023. static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
  3024. {
  3025. /*
  3026. * Drop reserved blocks
  3027. */
  3028. BUG_ON(!PageLocked(page));
  3029. if (!page_has_buffers(page))
  3030. goto out;
  3031. ext4_da_page_release_reservation(page, offset);
  3032. out:
  3033. ext4_invalidatepage(page, offset);
  3034. return;
  3035. }
  3036. /*
  3037. * Force all delayed allocation blocks to be allocated for a given inode.
  3038. */
  3039. int ext4_alloc_da_blocks(struct inode *inode)
  3040. {
  3041. trace_ext4_alloc_da_blocks(inode);
  3042. if (!EXT4_I(inode)->i_reserved_data_blocks &&
  3043. !EXT4_I(inode)->i_reserved_meta_blocks)
  3044. return 0;
  3045. /*
  3046. * We do something simple for now. The filemap_flush() will
  3047. * also start triggering a write of the data blocks, which is
  3048. * not strictly speaking necessary (and for users of
  3049. * laptop_mode, not even desirable). However, to do otherwise
  3050. * would require replicating code paths in:
  3051. *
  3052. * ext4_da_writepages() ->
  3053. * write_cache_pages() ---> (via passed in callback function)
  3054. * __mpage_da_writepage() -->
  3055. * mpage_add_bh_to_extent()
  3056. * mpage_da_map_blocks()
  3057. *
  3058. * The problem is that write_cache_pages(), located in
  3059. * mm/page-writeback.c, marks pages clean in preparation for
  3060. * doing I/O, which is not desirable if we're not planning on
  3061. * doing I/O at all.
  3062. *
  3063. * We could call write_cache_pages(), and then redirty all of
  3064. * the pages by calling redirty_page_for_writeback() but that
  3065. * would be ugly in the extreme. So instead we would need to
  3066. * replicate parts of the code in the above functions,
  3067. * simplifying them becuase we wouldn't actually intend to
  3068. * write out the pages, but rather only collect contiguous
  3069. * logical block extents, call the multi-block allocator, and
  3070. * then update the buffer heads with the block allocations.
  3071. *
  3072. * For now, though, we'll cheat by calling filemap_flush(),
  3073. * which will map the blocks, and start the I/O, but not
  3074. * actually wait for the I/O to complete.
  3075. */
  3076. return filemap_flush(inode->i_mapping);
  3077. }
  3078. /*
  3079. * bmap() is special. It gets used by applications such as lilo and by
  3080. * the swapper to find the on-disk block of a specific piece of data.
  3081. *
  3082. * Naturally, this is dangerous if the block concerned is still in the
  3083. * journal. If somebody makes a swapfile on an ext4 data-journaling
  3084. * filesystem and enables swap, then they may get a nasty shock when the
  3085. * data getting swapped to that swapfile suddenly gets overwritten by
  3086. * the original zero's written out previously to the journal and
  3087. * awaiting writeback in the kernel's buffer cache.
  3088. *
  3089. * So, if we see any bmap calls here on a modified, data-journaled file,
  3090. * take extra steps to flush any blocks which might be in the cache.
  3091. */
  3092. static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
  3093. {
  3094. struct inode *inode = mapping->host;
  3095. journal_t *journal;
  3096. int err;
  3097. if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
  3098. test_opt(inode->i_sb, DELALLOC)) {
  3099. /*
  3100. * With delalloc we want to sync the file
  3101. * so that we can make sure we allocate
  3102. * blocks for file
  3103. */
  3104. filemap_write_and_wait(mapping);
  3105. }
  3106. if (EXT4_JOURNAL(inode) &&
  3107. ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
  3108. /*
  3109. * This is a REALLY heavyweight approach, but the use of
  3110. * bmap on dirty files is expected to be extremely rare:
  3111. * only if we run lilo or swapon on a freshly made file
  3112. * do we expect this to happen.
  3113. *
  3114. * (bmap requires CAP_SYS_RAWIO so this does not
  3115. * represent an unprivileged user DOS attack --- we'd be
  3116. * in trouble if mortal users could trigger this path at
  3117. * will.)
  3118. *
  3119. * NB. EXT4_STATE_JDATA is not set on files other than
  3120. * regular files. If somebody wants to bmap a directory
  3121. * or symlink and gets confused because the buffer
  3122. * hasn't yet been flushed to disk, they deserve
  3123. * everything they get.
  3124. */
  3125. ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
  3126. journal = EXT4_JOURNAL(inode);
  3127. jbd2_journal_lock_updates(journal);
  3128. err = jbd2_journal_flush(journal);
  3129. jbd2_journal_unlock_updates(journal);
  3130. if (err)
  3131. return 0;
  3132. }
  3133. return generic_block_bmap(mapping, block, ext4_get_block);
  3134. }
  3135. static int ext4_readpage(struct file *file, struct page *page)
  3136. {
  3137. return mpage_readpage(page, ext4_get_block);
  3138. }
  3139. static int
  3140. ext4_readpages(struct file *file, struct address_space *mapping,
  3141. struct list_head *pages, unsigned nr_pages)
  3142. {
  3143. return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
  3144. }
  3145. static void ext4_free_io_end(ext4_io_end_t *io)
  3146. {
  3147. BUG_ON(!io);
  3148. if (io->page)
  3149. put_page(io->page);
  3150. iput(io->inode);
  3151. kfree(io);
  3152. }
  3153. static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
  3154. {
  3155. struct buffer_head *head, *bh;
  3156. unsigned int curr_off = 0;
  3157. if (!page_has_buffers(page))
  3158. return;
  3159. head = bh = page_buffers(page);
  3160. do {
  3161. if (offset <= curr_off && test_clear_buffer_uninit(bh)
  3162. && bh->b_private) {
  3163. ext4_free_io_end(bh->b_private);
  3164. bh->b_private = NULL;
  3165. bh->b_end_io = NULL;
  3166. }
  3167. curr_off = curr_off + bh->b_size;
  3168. bh = bh->b_this_page;
  3169. } while (bh != head);
  3170. }
  3171. static void ext4_invalidatepage(struct page *page, unsigned long offset)
  3172. {
  3173. journal_t *journal = EXT4_JOURNAL(page->mapping->host);
  3174. /*
  3175. * free any io_end structure allocated for buffers to be discarded
  3176. */
  3177. if (ext4_should_dioread_nolock(page->mapping->host))
  3178. ext4_invalidatepage_free_endio(page, offset);
  3179. /*
  3180. * If it's a full truncate we just forget about the pending dirtying
  3181. */
  3182. if (offset == 0)
  3183. ClearPageChecked(page);
  3184. if (journal)
  3185. jbd2_journal_invalidatepage(journal, page, offset);
  3186. else
  3187. block_invalidatepage(page, offset);
  3188. }
  3189. static int ext4_releasepage(struct page *page, gfp_t wait)
  3190. {
  3191. journal_t *journal = EXT4_JOURNAL(page->mapping->host);
  3192. WARN_ON(PageChecked(page));
  3193. if (!page_has_buffers(page))
  3194. return 0;
  3195. if (journal)
  3196. return jbd2_journal_try_to_free_buffers(journal, page, wait);
  3197. else
  3198. return try_to_free_buffers(page);
  3199. }
  3200. /*
  3201. * O_DIRECT for ext3 (or indirect map) based files
  3202. *
  3203. * If the O_DIRECT write will extend the file then add this inode to the
  3204. * orphan list. So recovery will truncate it back to the original size
  3205. * if the machine crashes during the write.
  3206. *
  3207. * If the O_DIRECT write is intantiating holes inside i_size and the machine
  3208. * crashes then stale disk data _may_ be exposed inside the file. But current
  3209. * VFS code falls back into buffered path in that case so we are safe.
  3210. */
  3211. static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
  3212. const struct iovec *iov, loff_t offset,
  3213. unsigned long nr_segs)
  3214. {
  3215. struct file *file = iocb->ki_filp;
  3216. struct inode *inode = file->f_mapping->host;
  3217. struct ext4_inode_info *ei = EXT4_I(inode);
  3218. handle_t *handle;
  3219. ssize_t ret;
  3220. int orphan = 0;
  3221. size_t count = iov_length(iov, nr_segs);
  3222. int retries = 0;
  3223. if (rw == WRITE) {
  3224. loff_t final_size = offset + count;
  3225. if (final_size > inode->i_size) {
  3226. /* Credits for sb + inode write */
  3227. handle = ext4_journal_start(inode, 2);
  3228. if (IS_ERR(handle)) {
  3229. ret = PTR_ERR(handle);
  3230. goto out;
  3231. }
  3232. ret = ext4_orphan_add(handle, inode);
  3233. if (ret) {
  3234. ext4_journal_stop(handle);
  3235. goto out;
  3236. }
  3237. orphan = 1;
  3238. ei->i_disksize = inode->i_size;
  3239. ext4_journal_stop(handle);
  3240. }
  3241. }
  3242. retry:
  3243. if (rw == READ && ext4_should_dioread_nolock(inode))
  3244. ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
  3245. inode->i_sb->s_bdev, iov,
  3246. offset, nr_segs,
  3247. ext4_get_block, NULL);
  3248. else
  3249. ret = blockdev_direct_IO(rw, iocb, inode,
  3250. inode->i_sb->s_bdev, iov,
  3251. offset, nr_segs,
  3252. ext4_get_block, NULL);
  3253. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  3254. goto retry;
  3255. if (orphan) {
  3256. int err;
  3257. /* Credits for sb + inode write */
  3258. handle = ext4_journal_start(inode, 2);
  3259. if (IS_ERR(handle)) {
  3260. /* This is really bad luck. We've written the data
  3261. * but cannot extend i_size. Bail out and pretend
  3262. * the write failed... */
  3263. ret = PTR_ERR(handle);
  3264. if (inode->i_nlink)
  3265. ext4_orphan_del(NULL, inode);
  3266. goto out;
  3267. }
  3268. if (inode->i_nlink)
  3269. ext4_orphan_del(handle, inode);
  3270. if (ret > 0) {
  3271. loff_t end = offset + ret;
  3272. if (end > inode->i_size) {
  3273. ei->i_disksize = end;
  3274. i_size_write(inode, end);
  3275. /*
  3276. * We're going to return a positive `ret'
  3277. * here due to non-zero-length I/O, so there's
  3278. * no way of reporting error returns from
  3279. * ext4_mark_inode_dirty() to userspace. So
  3280. * ignore it.
  3281. */
  3282. ext4_mark_inode_dirty(handle, inode);
  3283. }
  3284. }
  3285. err = ext4_journal_stop(handle);
  3286. if (ret == 0)
  3287. ret = err;
  3288. }
  3289. out:
  3290. return ret;
  3291. }
  3292. static int ext4_get_block_write(struct inode *inode, sector_t iblock,
  3293. struct buffer_head *bh_result, int create)
  3294. {
  3295. handle_t *handle = ext4_journal_current_handle();
  3296. int ret = 0;
  3297. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  3298. int dio_credits;
  3299. int started = 0;
  3300. ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
  3301. inode->i_ino, create);
  3302. /*
  3303. * ext4_get_block in prepare for a DIO write or buffer write.
  3304. * We allocate an uinitialized extent if blocks haven't been allocated.
  3305. * The extent will be converted to initialized after IO complete.
  3306. */
  3307. create = EXT4_GET_BLOCKS_IO_CREATE_EXT;
  3308. if (!handle) {
  3309. if (max_blocks > DIO_MAX_BLOCKS)
  3310. max_blocks = DIO_MAX_BLOCKS;
  3311. dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
  3312. handle = ext4_journal_start(inode, dio_credits);
  3313. if (IS_ERR(handle)) {
  3314. ret = PTR_ERR(handle);
  3315. goto out;
  3316. }
  3317. started = 1;
  3318. }
  3319. ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
  3320. create);
  3321. if (ret > 0) {
  3322. bh_result->b_size = (ret << inode->i_blkbits);
  3323. ret = 0;
  3324. }
  3325. if (started)
  3326. ext4_journal_stop(handle);
  3327. out:
  3328. return ret;
  3329. }
  3330. static void dump_completed_IO(struct inode * inode)
  3331. {
  3332. #ifdef EXT4_DEBUG
  3333. struct list_head *cur, *before, *after;
  3334. ext4_io_end_t *io, *io0, *io1;
  3335. unsigned long flags;
  3336. if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
  3337. ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
  3338. return;
  3339. }
  3340. ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
  3341. spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
  3342. list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
  3343. cur = &io->list;
  3344. before = cur->prev;
  3345. io0 = container_of(before, ext4_io_end_t, list);
  3346. after = cur->next;
  3347. io1 = container_of(after, ext4_io_end_t, list);
  3348. ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
  3349. io, inode->i_ino, io0, io1);
  3350. }
  3351. spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
  3352. #endif
  3353. }
  3354. /*
  3355. * check a range of space and convert unwritten extents to written.
  3356. */
  3357. static int ext4_end_io_nolock(ext4_io_end_t *io)
  3358. {
  3359. struct inode *inode = io->inode;
  3360. loff_t offset = io->offset;
  3361. ssize_t size = io->size;
  3362. int ret = 0;
  3363. ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
  3364. "list->prev 0x%p\n",
  3365. io, inode->i_ino, io->list.next, io->list.prev);
  3366. if (list_empty(&io->list))
  3367. return ret;
  3368. if (io->flag != EXT4_IO_UNWRITTEN)
  3369. return ret;
  3370. ret = ext4_convert_unwritten_extents(inode, offset, size);
  3371. if (ret < 0) {
  3372. printk(KERN_EMERG "%s: failed to convert unwritten"
  3373. "extents to written extents, error is %d"
  3374. " io is still on inode %lu aio dio list\n",
  3375. __func__, ret, inode->i_ino);
  3376. return ret;
  3377. }
  3378. /* clear the DIO AIO unwritten flag */
  3379. io->flag = 0;
  3380. return ret;
  3381. }
  3382. /*
  3383. * work on completed aio dio IO, to convert unwritten extents to extents
  3384. */
  3385. static void ext4_end_io_work(struct work_struct *work)
  3386. {
  3387. ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
  3388. struct inode *inode = io->inode;
  3389. struct ext4_inode_info *ei = EXT4_I(inode);
  3390. unsigned long flags;
  3391. int ret;
  3392. mutex_lock(&inode->i_mutex);
  3393. ret = ext4_end_io_nolock(io);
  3394. if (ret < 0) {
  3395. mutex_unlock(&inode->i_mutex);
  3396. return;
  3397. }
  3398. spin_lock_irqsave(&ei->i_completed_io_lock, flags);
  3399. if (!list_empty(&io->list))
  3400. list_del_init(&io->list);
  3401. spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  3402. mutex_unlock(&inode->i_mutex);
  3403. ext4_free_io_end(io);
  3404. }
  3405. /*
  3406. * This function is called from ext4_sync_file().
  3407. *
  3408. * When IO is completed, the work to convert unwritten extents to
  3409. * written is queued on workqueue but may not get immediately
  3410. * scheduled. When fsync is called, we need to ensure the
  3411. * conversion is complete before fsync returns.
  3412. * The inode keeps track of a list of pending/completed IO that
  3413. * might needs to do the conversion. This function walks through
  3414. * the list and convert the related unwritten extents for completed IO
  3415. * to written.
  3416. * The function return the number of pending IOs on success.
  3417. */
  3418. int flush_completed_IO(struct inode *inode)
  3419. {
  3420. ext4_io_end_t *io;
  3421. struct ext4_inode_info *ei = EXT4_I(inode);
  3422. unsigned long flags;
  3423. int ret = 0;
  3424. int ret2 = 0;
  3425. if (list_empty(&ei->i_completed_io_list))
  3426. return ret;
  3427. dump_completed_IO(inode);
  3428. spin_lock_irqsave(&ei->i_completed_io_lock, flags);
  3429. while (!list_empty(&ei->i_completed_io_list)){
  3430. io = list_entry(ei->i_completed_io_list.next,
  3431. ext4_io_end_t, list);
  3432. /*
  3433. * Calling ext4_end_io_nolock() to convert completed
  3434. * IO to written.
  3435. *
  3436. * When ext4_sync_file() is called, run_queue() may already
  3437. * about to flush the work corresponding to this io structure.
  3438. * It will be upset if it founds the io structure related
  3439. * to the work-to-be schedule is freed.
  3440. *
  3441. * Thus we need to keep the io structure still valid here after
  3442. * convertion finished. The io structure has a flag to
  3443. * avoid double converting from both fsync and background work
  3444. * queue work.
  3445. */
  3446. spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  3447. ret = ext4_end_io_nolock(io);
  3448. spin_lock_irqsave(&ei->i_completed_io_lock, flags);
  3449. if (ret < 0)
  3450. ret2 = ret;
  3451. else
  3452. list_del_init(&io->list);
  3453. }
  3454. spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  3455. return (ret2 < 0) ? ret2 : 0;
  3456. }
  3457. static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
  3458. {
  3459. ext4_io_end_t *io = NULL;
  3460. io = kmalloc(sizeof(*io), flags);
  3461. if (io) {
  3462. igrab(inode);
  3463. io->inode = inode;
  3464. io->flag = 0;
  3465. io->offset = 0;
  3466. io->size = 0;
  3467. io->page = NULL;
  3468. INIT_WORK(&io->work, ext4_end_io_work);
  3469. INIT_LIST_HEAD(&io->list);
  3470. }
  3471. return io;
  3472. }
  3473. static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
  3474. ssize_t size, void *private)
  3475. {
  3476. ext4_io_end_t *io_end = iocb->private;
  3477. struct workqueue_struct *wq;
  3478. unsigned long flags;
  3479. struct ext4_inode_info *ei;
  3480. /* if not async direct IO or dio with 0 bytes write, just return */
  3481. if (!io_end || !size)
  3482. return;
  3483. ext_debug("ext4_end_io_dio(): io_end 0x%p"
  3484. "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
  3485. iocb->private, io_end->inode->i_ino, iocb, offset,
  3486. size);
  3487. /* if not aio dio with unwritten extents, just free io and return */
  3488. if (io_end->flag != EXT4_IO_UNWRITTEN){
  3489. ext4_free_io_end(io_end);
  3490. iocb->private = NULL;
  3491. return;
  3492. }
  3493. io_end->offset = offset;
  3494. io_end->size = size;
  3495. io_end->flag = EXT4_IO_UNWRITTEN;
  3496. wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
  3497. /* queue the work to convert unwritten extents to written */
  3498. queue_work(wq, &io_end->work);
  3499. /* Add the io_end to per-inode completed aio dio list*/
  3500. ei = EXT4_I(io_end->inode);
  3501. spin_lock_irqsave(&ei->i_completed_io_lock, flags);
  3502. list_add_tail(&io_end->list, &ei->i_completed_io_list);
  3503. spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  3504. iocb->private = NULL;
  3505. }
  3506. static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
  3507. {
  3508. ext4_io_end_t *io_end = bh->b_private;
  3509. struct workqueue_struct *wq;
  3510. struct inode *inode;
  3511. unsigned long flags;
  3512. if (!test_clear_buffer_uninit(bh) || !io_end)
  3513. goto out;
  3514. if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
  3515. printk("sb umounted, discard end_io request for inode %lu\n",
  3516. io_end->inode->i_ino);
  3517. ext4_free_io_end(io_end);
  3518. goto out;
  3519. }
  3520. io_end->flag = EXT4_IO_UNWRITTEN;
  3521. inode = io_end->inode;
  3522. /* Add the io_end to per-inode completed io list*/
  3523. spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
  3524. list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
  3525. spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
  3526. wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
  3527. /* queue the work to convert unwritten extents to written */
  3528. queue_work(wq, &io_end->work);
  3529. out:
  3530. bh->b_private = NULL;
  3531. bh->b_end_io = NULL;
  3532. clear_buffer_uninit(bh);
  3533. end_buffer_async_write(bh, uptodate);
  3534. }
  3535. static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
  3536. {
  3537. ext4_io_end_t *io_end;
  3538. struct page *page = bh->b_page;
  3539. loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
  3540. size_t size = bh->b_size;
  3541. retry:
  3542. io_end = ext4_init_io_end(inode, GFP_ATOMIC);
  3543. if (!io_end) {
  3544. if (printk_ratelimit())
  3545. printk(KERN_WARNING "%s: allocation fail\n", __func__);
  3546. schedule();
  3547. goto retry;
  3548. }
  3549. io_end->offset = offset;
  3550. io_end->size = size;
  3551. /*
  3552. * We need to hold a reference to the page to make sure it
  3553. * doesn't get evicted before ext4_end_io_work() has a chance
  3554. * to convert the extent from written to unwritten.
  3555. */
  3556. io_end->page = page;
  3557. get_page(io_end->page);
  3558. bh->b_private = io_end;
  3559. bh->b_end_io = ext4_end_io_buffer_write;
  3560. return 0;
  3561. }
  3562. /*
  3563. * For ext4 extent files, ext4 will do direct-io write to holes,
  3564. * preallocated extents, and those write extend the file, no need to
  3565. * fall back to buffered IO.
  3566. *
  3567. * For holes, we fallocate those blocks, mark them as unintialized
  3568. * If those blocks were preallocated, we mark sure they are splited, but
  3569. * still keep the range to write as unintialized.
  3570. *
  3571. * The unwrritten extents will be converted to written when DIO is completed.
  3572. * For async direct IO, since the IO may still pending when return, we
  3573. * set up an end_io call back function, which will do the convertion
  3574. * when async direct IO completed.
  3575. *
  3576. * If the O_DIRECT write will extend the file then add this inode to the
  3577. * orphan list. So recovery will truncate it back to the original size
  3578. * if the machine crashes during the write.
  3579. *
  3580. */
  3581. static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
  3582. const struct iovec *iov, loff_t offset,
  3583. unsigned long nr_segs)
  3584. {
  3585. struct file *file = iocb->ki_filp;
  3586. struct inode *inode = file->f_mapping->host;
  3587. ssize_t ret;
  3588. size_t count = iov_length(iov, nr_segs);
  3589. loff_t final_size = offset + count;
  3590. if (rw == WRITE && final_size <= inode->i_size) {
  3591. /*
  3592. * We could direct write to holes and fallocate.
  3593. *
  3594. * Allocated blocks to fill the hole are marked as uninitialized
  3595. * to prevent paralel buffered read to expose the stale data
  3596. * before DIO complete the data IO.
  3597. *
  3598. * As to previously fallocated extents, ext4 get_block
  3599. * will just simply mark the buffer mapped but still
  3600. * keep the extents uninitialized.
  3601. *
  3602. * for non AIO case, we will convert those unwritten extents
  3603. * to written after return back from blockdev_direct_IO.
  3604. *
  3605. * for async DIO, the conversion needs to be defered when
  3606. * the IO is completed. The ext4 end_io callback function
  3607. * will be called to take care of the conversion work.
  3608. * Here for async case, we allocate an io_end structure to
  3609. * hook to the iocb.
  3610. */
  3611. iocb->private = NULL;
  3612. EXT4_I(inode)->cur_aio_dio = NULL;
  3613. if (!is_sync_kiocb(iocb)) {
  3614. iocb->private = ext4_init_io_end(inode, GFP_NOFS);
  3615. if (!iocb->private)
  3616. return -ENOMEM;
  3617. /*
  3618. * we save the io structure for current async
  3619. * direct IO, so that later ext4_get_blocks()
  3620. * could flag the io structure whether there
  3621. * is a unwritten extents needs to be converted
  3622. * when IO is completed.
  3623. */
  3624. EXT4_I(inode)->cur_aio_dio = iocb->private;
  3625. }
  3626. ret = blockdev_direct_IO(rw, iocb, inode,
  3627. inode->i_sb->s_bdev, iov,
  3628. offset, nr_segs,
  3629. ext4_get_block_write,
  3630. ext4_end_io_dio);
  3631. if (iocb->private)
  3632. EXT4_I(inode)->cur_aio_dio = NULL;
  3633. /*
  3634. * The io_end structure takes a reference to the inode,
  3635. * that structure needs to be destroyed and the
  3636. * reference to the inode need to be dropped, when IO is
  3637. * complete, even with 0 byte write, or failed.
  3638. *
  3639. * In the successful AIO DIO case, the io_end structure will be
  3640. * desctroyed and the reference to the inode will be dropped
  3641. * after the end_io call back function is called.
  3642. *
  3643. * In the case there is 0 byte write, or error case, since
  3644. * VFS direct IO won't invoke the end_io call back function,
  3645. * we need to free the end_io structure here.
  3646. */
  3647. if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
  3648. ext4_free_io_end(iocb->private);
  3649. iocb->private = NULL;
  3650. } else if (ret > 0 && ext4_test_inode_state(inode,
  3651. EXT4_STATE_DIO_UNWRITTEN)) {
  3652. int err;
  3653. /*
  3654. * for non AIO case, since the IO is already
  3655. * completed, we could do the convertion right here
  3656. */
  3657. err = ext4_convert_unwritten_extents(inode,
  3658. offset, ret);
  3659. if (err < 0)
  3660. ret = err;
  3661. ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
  3662. }
  3663. return ret;
  3664. }
  3665. /* for write the the end of file case, we fall back to old way */
  3666. return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
  3667. }
  3668. static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
  3669. const struct iovec *iov, loff_t offset,
  3670. unsigned long nr_segs)
  3671. {
  3672. struct file *file = iocb->ki_filp;
  3673. struct inode *inode = file->f_mapping->host;
  3674. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
  3675. return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
  3676. return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
  3677. }
  3678. /*
  3679. * Pages can be marked dirty completely asynchronously from ext4's journalling
  3680. * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
  3681. * much here because ->set_page_dirty is called under VFS locks. The page is
  3682. * not necessarily locked.
  3683. *
  3684. * We cannot just dirty the page and leave attached buffers clean, because the
  3685. * buffers' dirty state is "definitive". We cannot just set the buffers dirty
  3686. * or jbddirty because all the journalling code will explode.
  3687. *
  3688. * So what we do is to mark the page "pending dirty" and next time writepage
  3689. * is called, propagate that into the buffers appropriately.
  3690. */
  3691. static int ext4_journalled_set_page_dirty(struct page *page)
  3692. {
  3693. SetPageChecked(page);
  3694. return __set_page_dirty_nobuffers(page);
  3695. }
  3696. static const struct address_space_operations ext4_ordered_aops = {
  3697. .readpage = ext4_readpage,
  3698. .readpages = ext4_readpages,
  3699. .writepage = ext4_writepage,
  3700. .sync_page = block_sync_page,
  3701. .write_begin = ext4_write_begin,
  3702. .write_end = ext4_ordered_write_end,
  3703. .bmap = ext4_bmap,
  3704. .invalidatepage = ext4_invalidatepage,
  3705. .releasepage = ext4_releasepage,
  3706. .direct_IO = ext4_direct_IO,
  3707. .migratepage = buffer_migrate_page,
  3708. .is_partially_uptodate = block_is_partially_uptodate,
  3709. .error_remove_page = generic_error_remove_page,
  3710. };
  3711. static const struct address_space_operations ext4_writeback_aops = {
  3712. .readpage = ext4_readpage,
  3713. .readpages = ext4_readpages,
  3714. .writepage = ext4_writepage,
  3715. .sync_page = block_sync_page,
  3716. .write_begin = ext4_write_begin,
  3717. .write_end = ext4_writeback_write_end,
  3718. .bmap = ext4_bmap,
  3719. .invalidatepage = ext4_invalidatepage,
  3720. .releasepage = ext4_releasepage,
  3721. .direct_IO = ext4_direct_IO,
  3722. .migratepage = buffer_migrate_page,
  3723. .is_partially_uptodate = block_is_partially_uptodate,
  3724. .error_remove_page = generic_error_remove_page,
  3725. };
  3726. static const struct address_space_operations ext4_journalled_aops = {
  3727. .readpage = ext4_readpage,
  3728. .readpages = ext4_readpages,
  3729. .writepage = ext4_writepage,
  3730. .sync_page = block_sync_page,
  3731. .write_begin = ext4_write_begin,
  3732. .write_end = ext4_journalled_write_end,
  3733. .set_page_dirty = ext4_journalled_set_page_dirty,
  3734. .bmap = ext4_bmap,
  3735. .invalidatepage = ext4_invalidatepage,
  3736. .releasepage = ext4_releasepage,
  3737. .is_partially_uptodate = block_is_partially_uptodate,
  3738. .error_remove_page = generic_error_remove_page,
  3739. };
  3740. static const struct address_space_operations ext4_da_aops = {
  3741. .readpage = ext4_readpage,
  3742. .readpages = ext4_readpages,
  3743. .writepage = ext4_writepage,
  3744. .writepages = ext4_da_writepages,
  3745. .sync_page = block_sync_page,
  3746. .write_begin = ext4_da_write_begin,
  3747. .write_end = ext4_da_write_end,
  3748. .bmap = ext4_bmap,
  3749. .invalidatepage = ext4_da_invalidatepage,
  3750. .releasepage = ext4_releasepage,
  3751. .direct_IO = ext4_direct_IO,
  3752. .migratepage = buffer_migrate_page,
  3753. .is_partially_uptodate = block_is_partially_uptodate,
  3754. .error_remove_page = generic_error_remove_page,
  3755. };
  3756. void ext4_set_aops(struct inode *inode)
  3757. {
  3758. if (ext4_should_order_data(inode) &&
  3759. test_opt(inode->i_sb, DELALLOC))
  3760. inode->i_mapping->a_ops = &ext4_da_aops;
  3761. else if (ext4_should_order_data(inode))
  3762. inode->i_mapping->a_ops = &ext4_ordered_aops;
  3763. else if (ext4_should_writeback_data(inode) &&
  3764. test_opt(inode->i_sb, DELALLOC))
  3765. inode->i_mapping->a_ops = &ext4_da_aops;
  3766. else if (ext4_should_writeback_data(inode))
  3767. inode->i_mapping->a_ops = &ext4_writeback_aops;
  3768. else
  3769. inode->i_mapping->a_ops = &ext4_journalled_aops;
  3770. }
  3771. /*
  3772. * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
  3773. * up to the end of the block which corresponds to `from'.
  3774. * This required during truncate. We need to physically zero the tail end
  3775. * of that block so it doesn't yield old data if the file is later grown.
  3776. */
  3777. int ext4_block_truncate_page(handle_t *handle,
  3778. struct address_space *mapping, loff_t from)
  3779. {
  3780. ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
  3781. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  3782. unsigned blocksize, length, pos;
  3783. ext4_lblk_t iblock;
  3784. struct inode *inode = mapping->host;
  3785. struct buffer_head *bh;
  3786. struct page *page;
  3787. int err = 0;
  3788. page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
  3789. mapping_gfp_mask(mapping) & ~__GFP_FS);
  3790. if (!page)
  3791. return -EINVAL;
  3792. blocksize = inode->i_sb->s_blocksize;
  3793. length = blocksize - (offset & (blocksize - 1));
  3794. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  3795. /*
  3796. * For "nobh" option, we can only work if we don't need to
  3797. * read-in the page - otherwise we create buffers to do the IO.
  3798. */
  3799. if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
  3800. ext4_should_writeback_data(inode) && PageUptodate(page)) {
  3801. zero_user(page, offset, length);
  3802. set_page_dirty(page);
  3803. goto unlock;
  3804. }
  3805. if (!page_has_buffers(page))
  3806. create_empty_buffers(page, blocksize, 0);
  3807. /* Find the buffer that contains "offset" */
  3808. bh = page_buffers(page);
  3809. pos = blocksize;
  3810. while (offset >= pos) {
  3811. bh = bh->b_this_page;
  3812. iblock++;
  3813. pos += blocksize;
  3814. }
  3815. err = 0;
  3816. if (buffer_freed(bh)) {
  3817. BUFFER_TRACE(bh, "freed: skip");
  3818. goto unlock;
  3819. }
  3820. if (!buffer_mapped(bh)) {
  3821. BUFFER_TRACE(bh, "unmapped");
  3822. ext4_get_block(inode, iblock, bh, 0);
  3823. /* unmapped? It's a hole - nothing to do */
  3824. if (!buffer_mapped(bh)) {
  3825. BUFFER_TRACE(bh, "still unmapped");
  3826. goto unlock;
  3827. }
  3828. }
  3829. /* Ok, it's mapped. Make sure it's up-to-date */
  3830. if (PageUptodate(page))
  3831. set_buffer_uptodate(bh);
  3832. if (!buffer_uptodate(bh)) {
  3833. err = -EIO;
  3834. ll_rw_block(READ, 1, &bh);
  3835. wait_on_buffer(bh);
  3836. /* Uhhuh. Read error. Complain and punt. */
  3837. if (!buffer_uptodate(bh))
  3838. goto unlock;
  3839. }
  3840. if (ext4_should_journal_data(inode)) {
  3841. BUFFER_TRACE(bh, "get write access");
  3842. err = ext4_journal_get_write_access(handle, bh);
  3843. if (err)
  3844. goto unlock;
  3845. }
  3846. zero_user(page, offset, length);
  3847. BUFFER_TRACE(bh, "zeroed end of block");
  3848. err = 0;
  3849. if (ext4_should_journal_data(inode)) {
  3850. err = ext4_handle_dirty_metadata(handle, inode, bh);
  3851. } else {
  3852. if (ext4_should_order_data(inode))
  3853. err = ext4_jbd2_file_inode(handle, inode);
  3854. mark_buffer_dirty(bh);
  3855. }
  3856. unlock:
  3857. unlock_page(page);
  3858. page_cache_release(page);
  3859. return err;
  3860. }
  3861. /*
  3862. * Probably it should be a library function... search for first non-zero word
  3863. * or memcmp with zero_page, whatever is better for particular architecture.
  3864. * Linus?
  3865. */
  3866. static inline int all_zeroes(__le32 *p, __le32 *q)
  3867. {
  3868. while (p < q)
  3869. if (*p++)
  3870. return 0;
  3871. return 1;
  3872. }
  3873. /**
  3874. * ext4_find_shared - find the indirect blocks for partial truncation.
  3875. * @inode: inode in question
  3876. * @depth: depth of the affected branch
  3877. * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
  3878. * @chain: place to store the pointers to partial indirect blocks
  3879. * @top: place to the (detached) top of branch
  3880. *
  3881. * This is a helper function used by ext4_truncate().
  3882. *
  3883. * When we do truncate() we may have to clean the ends of several
  3884. * indirect blocks but leave the blocks themselves alive. Block is
  3885. * partially truncated if some data below the new i_size is refered
  3886. * from it (and it is on the path to the first completely truncated
  3887. * data block, indeed). We have to free the top of that path along
  3888. * with everything to the right of the path. Since no allocation
  3889. * past the truncation point is possible until ext4_truncate()
  3890. * finishes, we may safely do the latter, but top of branch may
  3891. * require special attention - pageout below the truncation point
  3892. * might try to populate it.
  3893. *
  3894. * We atomically detach the top of branch from the tree, store the
  3895. * block number of its root in *@top, pointers to buffer_heads of
  3896. * partially truncated blocks - in @chain[].bh and pointers to
  3897. * their last elements that should not be removed - in
  3898. * @chain[].p. Return value is the pointer to last filled element
  3899. * of @chain.
  3900. *
  3901. * The work left to caller to do the actual freeing of subtrees:
  3902. * a) free the subtree starting from *@top
  3903. * b) free the subtrees whose roots are stored in
  3904. * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
  3905. * c) free the subtrees growing from the inode past the @chain[0].
  3906. * (no partially truncated stuff there). */
  3907. static Indirect *ext4_find_shared(struct inode *inode, int depth,
  3908. ext4_lblk_t offsets[4], Indirect chain[4],
  3909. __le32 *top)
  3910. {
  3911. Indirect *partial, *p;
  3912. int k, err;
  3913. *top = 0;
  3914. /* Make k index the deepest non-null offset + 1 */
  3915. for (k = depth; k > 1 && !offsets[k-1]; k--)
  3916. ;
  3917. partial = ext4_get_branch(inode, k, offsets, chain, &err);
  3918. /* Writer: pointers */
  3919. if (!partial)
  3920. partial = chain + k-1;
  3921. /*
  3922. * If the branch acquired continuation since we've looked at it -
  3923. * fine, it should all survive and (new) top doesn't belong to us.
  3924. */
  3925. if (!partial->key && *partial->p)
  3926. /* Writer: end */
  3927. goto no_top;
  3928. for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
  3929. ;
  3930. /*
  3931. * OK, we've found the last block that must survive. The rest of our
  3932. * branch should be detached before unlocking. However, if that rest
  3933. * of branch is all ours and does not grow immediately from the inode
  3934. * it's easier to cheat and just decrement partial->p.
  3935. */
  3936. if (p == chain + k - 1 && p > chain) {
  3937. p->p--;
  3938. } else {
  3939. *top = *p->p;
  3940. /* Nope, don't do this in ext4. Must leave the tree intact */
  3941. #if 0
  3942. *p->p = 0;
  3943. #endif
  3944. }
  3945. /* Writer: end */
  3946. while (partial > p) {
  3947. brelse(partial->bh);
  3948. partial--;
  3949. }
  3950. no_top:
  3951. return partial;
  3952. }
  3953. /*
  3954. * Zero a number of block pointers in either an inode or an indirect block.
  3955. * If we restart the transaction we must again get write access to the
  3956. * indirect block for further modification.
  3957. *
  3958. * We release `count' blocks on disk, but (last - first) may be greater
  3959. * than `count' because there can be holes in there.
  3960. */
  3961. static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
  3962. struct buffer_head *bh,
  3963. ext4_fsblk_t block_to_free,
  3964. unsigned long count, __le32 *first,
  3965. __le32 *last)
  3966. {
  3967. __le32 *p;
  3968. int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
  3969. if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
  3970. flags |= EXT4_FREE_BLOCKS_METADATA;
  3971. if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
  3972. count)) {
  3973. ext4_error(inode->i_sb, "inode #%lu: "
  3974. "attempt to clear blocks %llu len %lu, invalid",
  3975. inode->i_ino, (unsigned long long) block_to_free,
  3976. count);
  3977. return 1;
  3978. }
  3979. if (try_to_extend_transaction(handle, inode)) {
  3980. if (bh) {
  3981. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  3982. ext4_handle_dirty_metadata(handle, inode, bh);
  3983. }
  3984. ext4_mark_inode_dirty(handle, inode);
  3985. ext4_truncate_restart_trans(handle, inode,
  3986. blocks_for_truncate(inode));
  3987. if (bh) {
  3988. BUFFER_TRACE(bh, "retaking write access");
  3989. ext4_journal_get_write_access(handle, bh);
  3990. }
  3991. }
  3992. for (p = first; p < last; p++)
  3993. *p = 0;
  3994. ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
  3995. return 0;
  3996. }
  3997. /**
  3998. * ext4_free_data - free a list of data blocks
  3999. * @handle: handle for this transaction
  4000. * @inode: inode we are dealing with
  4001. * @this_bh: indirect buffer_head which contains *@first and *@last
  4002. * @first: array of block numbers
  4003. * @last: points immediately past the end of array
  4004. *
  4005. * We are freeing all blocks refered from that array (numbers are stored as
  4006. * little-endian 32-bit) and updating @inode->i_blocks appropriately.
  4007. *
  4008. * We accumulate contiguous runs of blocks to free. Conveniently, if these
  4009. * blocks are contiguous then releasing them at one time will only affect one
  4010. * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
  4011. * actually use a lot of journal space.
  4012. *
  4013. * @this_bh will be %NULL if @first and @last point into the inode's direct
  4014. * block pointers.
  4015. */
  4016. static void ext4_free_data(handle_t *handle, struct inode *inode,
  4017. struct buffer_head *this_bh,
  4018. __le32 *first, __le32 *last)
  4019. {
  4020. ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
  4021. unsigned long count = 0; /* Number of blocks in the run */
  4022. __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
  4023. corresponding to
  4024. block_to_free */
  4025. ext4_fsblk_t nr; /* Current block # */
  4026. __le32 *p; /* Pointer into inode/ind
  4027. for current block */
  4028. int err;
  4029. if (this_bh) { /* For indirect block */
  4030. BUFFER_TRACE(this_bh, "get_write_access");
  4031. err = ext4_journal_get_write_access(handle, this_bh);
  4032. /* Important: if we can't update the indirect pointers
  4033. * to the blocks, we can't free them. */
  4034. if (err)
  4035. return;
  4036. }
  4037. for (p = first; p < last; p++) {
  4038. nr = le32_to_cpu(*p);
  4039. if (nr) {
  4040. /* accumulate blocks to free if they're contiguous */
  4041. if (count == 0) {
  4042. block_to_free = nr;
  4043. block_to_free_p = p;
  4044. count = 1;
  4045. } else if (nr == block_to_free + count) {
  4046. count++;
  4047. } else {
  4048. if (ext4_clear_blocks(handle, inode, this_bh,
  4049. block_to_free, count,
  4050. block_to_free_p, p))
  4051. break;
  4052. block_to_free = nr;
  4053. block_to_free_p = p;
  4054. count = 1;
  4055. }
  4056. }
  4057. }
  4058. if (count > 0)
  4059. ext4_clear_blocks(handle, inode, this_bh, block_to_free,
  4060. count, block_to_free_p, p);
  4061. if (this_bh) {
  4062. BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
  4063. /*
  4064. * The buffer head should have an attached journal head at this
  4065. * point. However, if the data is corrupted and an indirect
  4066. * block pointed to itself, it would have been detached when
  4067. * the block was cleared. Check for this instead of OOPSing.
  4068. */
  4069. if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
  4070. ext4_handle_dirty_metadata(handle, inode, this_bh);
  4071. else
  4072. ext4_error(inode->i_sb,
  4073. "circular indirect block detected, "
  4074. "inode=%lu, block=%llu",
  4075. inode->i_ino,
  4076. (unsigned long long) this_bh->b_blocknr);
  4077. }
  4078. }
  4079. /**
  4080. * ext4_free_branches - free an array of branches
  4081. * @handle: JBD handle for this transaction
  4082. * @inode: inode we are dealing with
  4083. * @parent_bh: the buffer_head which contains *@first and *@last
  4084. * @first: array of block numbers
  4085. * @last: pointer immediately past the end of array
  4086. * @depth: depth of the branches to free
  4087. *
  4088. * We are freeing all blocks refered from these branches (numbers are
  4089. * stored as little-endian 32-bit) and updating @inode->i_blocks
  4090. * appropriately.
  4091. */
  4092. static void ext4_free_branches(handle_t *handle, struct inode *inode,
  4093. struct buffer_head *parent_bh,
  4094. __le32 *first, __le32 *last, int depth)
  4095. {
  4096. ext4_fsblk_t nr;
  4097. __le32 *p;
  4098. if (ext4_handle_is_aborted(handle))
  4099. return;
  4100. if (depth--) {
  4101. struct buffer_head *bh;
  4102. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  4103. p = last;
  4104. while (--p >= first) {
  4105. nr = le32_to_cpu(*p);
  4106. if (!nr)
  4107. continue; /* A hole */
  4108. if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
  4109. nr, 1)) {
  4110. ext4_error(inode->i_sb,
  4111. "indirect mapped block in inode "
  4112. "#%lu invalid (level %d, blk #%lu)",
  4113. inode->i_ino, depth,
  4114. (unsigned long) nr);
  4115. break;
  4116. }
  4117. /* Go read the buffer for the next level down */
  4118. bh = sb_bread(inode->i_sb, nr);
  4119. /*
  4120. * A read failure? Report error and clear slot
  4121. * (should be rare).
  4122. */
  4123. if (!bh) {
  4124. ext4_error(inode->i_sb,
  4125. "Read failure, inode=%lu, block=%llu",
  4126. inode->i_ino, nr);
  4127. continue;
  4128. }
  4129. /* This zaps the entire block. Bottom up. */
  4130. BUFFER_TRACE(bh, "free child branches");
  4131. ext4_free_branches(handle, inode, bh,
  4132. (__le32 *) bh->b_data,
  4133. (__le32 *) bh->b_data + addr_per_block,
  4134. depth);
  4135. /*
  4136. * We've probably journalled the indirect block several
  4137. * times during the truncate. But it's no longer
  4138. * needed and we now drop it from the transaction via
  4139. * jbd2_journal_revoke().
  4140. *
  4141. * That's easy if it's exclusively part of this
  4142. * transaction. But if it's part of the committing
  4143. * transaction then jbd2_journal_forget() will simply
  4144. * brelse() it. That means that if the underlying
  4145. * block is reallocated in ext4_get_block(),
  4146. * unmap_underlying_metadata() will find this block
  4147. * and will try to get rid of it. damn, damn.
  4148. *
  4149. * If this block has already been committed to the
  4150. * journal, a revoke record will be written. And
  4151. * revoke records must be emitted *before* clearing
  4152. * this block's bit in the bitmaps.
  4153. */
  4154. ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
  4155. /*
  4156. * Everything below this this pointer has been
  4157. * released. Now let this top-of-subtree go.
  4158. *
  4159. * We want the freeing of this indirect block to be
  4160. * atomic in the journal with the updating of the
  4161. * bitmap block which owns it. So make some room in
  4162. * the journal.
  4163. *
  4164. * We zero the parent pointer *after* freeing its
  4165. * pointee in the bitmaps, so if extend_transaction()
  4166. * for some reason fails to put the bitmap changes and
  4167. * the release into the same transaction, recovery
  4168. * will merely complain about releasing a free block,
  4169. * rather than leaking blocks.
  4170. */
  4171. if (ext4_handle_is_aborted(handle))
  4172. return;
  4173. if (try_to_extend_transaction(handle, inode)) {
  4174. ext4_mark_inode_dirty(handle, inode);
  4175. ext4_truncate_restart_trans(handle, inode,
  4176. blocks_for_truncate(inode));
  4177. }
  4178. ext4_free_blocks(handle, inode, 0, nr, 1,
  4179. EXT4_FREE_BLOCKS_METADATA);
  4180. if (parent_bh) {
  4181. /*
  4182. * The block which we have just freed is
  4183. * pointed to by an indirect block: journal it
  4184. */
  4185. BUFFER_TRACE(parent_bh, "get_write_access");
  4186. if (!ext4_journal_get_write_access(handle,
  4187. parent_bh)){
  4188. *p = 0;
  4189. BUFFER_TRACE(parent_bh,
  4190. "call ext4_handle_dirty_metadata");
  4191. ext4_handle_dirty_metadata(handle,
  4192. inode,
  4193. parent_bh);
  4194. }
  4195. }
  4196. }
  4197. } else {
  4198. /* We have reached the bottom of the tree. */
  4199. BUFFER_TRACE(parent_bh, "free data blocks");
  4200. ext4_free_data(handle, inode, parent_bh, first, last);
  4201. }
  4202. }
  4203. int ext4_can_truncate(struct inode *inode)
  4204. {
  4205. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  4206. return 0;
  4207. if (S_ISREG(inode->i_mode))
  4208. return 1;
  4209. if (S_ISDIR(inode->i_mode))
  4210. return 1;
  4211. if (S_ISLNK(inode->i_mode))
  4212. return !ext4_inode_is_fast_symlink(inode);
  4213. return 0;
  4214. }
  4215. /*
  4216. * ext4_truncate()
  4217. *
  4218. * We block out ext4_get_block() block instantiations across the entire
  4219. * transaction, and VFS/VM ensures that ext4_truncate() cannot run
  4220. * simultaneously on behalf of the same inode.
  4221. *
  4222. * As we work through the truncate and commmit bits of it to the journal there
  4223. * is one core, guiding principle: the file's tree must always be consistent on
  4224. * disk. We must be able to restart the truncate after a crash.
  4225. *
  4226. * The file's tree may be transiently inconsistent in memory (although it
  4227. * probably isn't), but whenever we close off and commit a journal transaction,
  4228. * the contents of (the filesystem + the journal) must be consistent and
  4229. * restartable. It's pretty simple, really: bottom up, right to left (although
  4230. * left-to-right works OK too).
  4231. *
  4232. * Note that at recovery time, journal replay occurs *before* the restart of
  4233. * truncate against the orphan inode list.
  4234. *
  4235. * The committed inode has the new, desired i_size (which is the same as
  4236. * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
  4237. * that this inode's truncate did not complete and it will again call
  4238. * ext4_truncate() to have another go. So there will be instantiated blocks
  4239. * to the right of the truncation point in a crashed ext4 filesystem. But
  4240. * that's fine - as long as they are linked from the inode, the post-crash
  4241. * ext4_truncate() run will find them and release them.
  4242. */
  4243. void ext4_truncate(struct inode *inode)
  4244. {
  4245. handle_t *handle;
  4246. struct ext4_inode_info *ei = EXT4_I(inode);
  4247. __le32 *i_data = ei->i_data;
  4248. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  4249. struct address_space *mapping = inode->i_mapping;
  4250. ext4_lblk_t offsets[4];
  4251. Indirect chain[4];
  4252. Indirect *partial;
  4253. __le32 nr = 0;
  4254. int n;
  4255. ext4_lblk_t last_block;
  4256. unsigned blocksize = inode->i_sb->s_blocksize;
  4257. if (!ext4_can_truncate(inode))
  4258. return;
  4259. EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
  4260. if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
  4261. ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  4262. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  4263. ext4_ext_truncate(inode);
  4264. return;
  4265. }
  4266. handle = start_transaction(inode);
  4267. if (IS_ERR(handle))
  4268. return; /* AKPM: return what? */
  4269. last_block = (inode->i_size + blocksize-1)
  4270. >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
  4271. if (inode->i_size & (blocksize - 1))
  4272. if (ext4_block_truncate_page(handle, mapping, inode->i_size))
  4273. goto out_stop;
  4274. n = ext4_block_to_path(inode, last_block, offsets, NULL);
  4275. if (n == 0)
  4276. goto out_stop; /* error */
  4277. /*
  4278. * OK. This truncate is going to happen. We add the inode to the
  4279. * orphan list, so that if this truncate spans multiple transactions,
  4280. * and we crash, we will resume the truncate when the filesystem
  4281. * recovers. It also marks the inode dirty, to catch the new size.
  4282. *
  4283. * Implication: the file must always be in a sane, consistent
  4284. * truncatable state while each transaction commits.
  4285. */
  4286. if (ext4_orphan_add(handle, inode))
  4287. goto out_stop;
  4288. /*
  4289. * From here we block out all ext4_get_block() callers who want to
  4290. * modify the block allocation tree.
  4291. */
  4292. down_write(&ei->i_data_sem);
  4293. ext4_discard_preallocations(inode);
  4294. /*
  4295. * The orphan list entry will now protect us from any crash which
  4296. * occurs before the truncate completes, so it is now safe to propagate
  4297. * the new, shorter inode size (held for now in i_size) into the
  4298. * on-disk inode. We do this via i_disksize, which is the value which
  4299. * ext4 *really* writes onto the disk inode.
  4300. */
  4301. ei->i_disksize = inode->i_size;
  4302. if (n == 1) { /* direct blocks */
  4303. ext4_free_data(handle, inode, NULL, i_data+offsets[0],
  4304. i_data + EXT4_NDIR_BLOCKS);
  4305. goto do_indirects;
  4306. }
  4307. partial = ext4_find_shared(inode, n, offsets, chain, &nr);
  4308. /* Kill the top of shared branch (not detached) */
  4309. if (nr) {
  4310. if (partial == chain) {
  4311. /* Shared branch grows from the inode */
  4312. ext4_free_branches(handle, inode, NULL,
  4313. &nr, &nr+1, (chain+n-1) - partial);
  4314. *partial->p = 0;
  4315. /*
  4316. * We mark the inode dirty prior to restart,
  4317. * and prior to stop. No need for it here.
  4318. */
  4319. } else {
  4320. /* Shared branch grows from an indirect block */
  4321. BUFFER_TRACE(partial->bh, "get_write_access");
  4322. ext4_free_branches(handle, inode, partial->bh,
  4323. partial->p,
  4324. partial->p+1, (chain+n-1) - partial);
  4325. }
  4326. }
  4327. /* Clear the ends of indirect blocks on the shared branch */
  4328. while (partial > chain) {
  4329. ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
  4330. (__le32*)partial->bh->b_data+addr_per_block,
  4331. (chain+n-1) - partial);
  4332. BUFFER_TRACE(partial->bh, "call brelse");
  4333. brelse(partial->bh);
  4334. partial--;
  4335. }
  4336. do_indirects:
  4337. /* Kill the remaining (whole) subtrees */
  4338. switch (offsets[0]) {
  4339. default:
  4340. nr = i_data[EXT4_IND_BLOCK];
  4341. if (nr) {
  4342. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
  4343. i_data[EXT4_IND_BLOCK] = 0;
  4344. }
  4345. case EXT4_IND_BLOCK:
  4346. nr = i_data[EXT4_DIND_BLOCK];
  4347. if (nr) {
  4348. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
  4349. i_data[EXT4_DIND_BLOCK] = 0;
  4350. }
  4351. case EXT4_DIND_BLOCK:
  4352. nr = i_data[EXT4_TIND_BLOCK];
  4353. if (nr) {
  4354. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
  4355. i_data[EXT4_TIND_BLOCK] = 0;
  4356. }
  4357. case EXT4_TIND_BLOCK:
  4358. ;
  4359. }
  4360. up_write(&ei->i_data_sem);
  4361. inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
  4362. ext4_mark_inode_dirty(handle, inode);
  4363. /*
  4364. * In a multi-transaction truncate, we only make the final transaction
  4365. * synchronous
  4366. */
  4367. if (IS_SYNC(inode))
  4368. ext4_handle_sync(handle);
  4369. out_stop:
  4370. /*
  4371. * If this was a simple ftruncate(), and the file will remain alive
  4372. * then we need to clear up the orphan record which we created above.
  4373. * However, if this was a real unlink then we were called by
  4374. * ext4_delete_inode(), and we allow that function to clean up the
  4375. * orphan info for us.
  4376. */
  4377. if (inode->i_nlink)
  4378. ext4_orphan_del(handle, inode);
  4379. ext4_journal_stop(handle);
  4380. }
  4381. /*
  4382. * ext4_get_inode_loc returns with an extra refcount against the inode's
  4383. * underlying buffer_head on success. If 'in_mem' is true, we have all
  4384. * data in memory that is needed to recreate the on-disk version of this
  4385. * inode.
  4386. */
  4387. static int __ext4_get_inode_loc(struct inode *inode,
  4388. struct ext4_iloc *iloc, int in_mem)
  4389. {
  4390. struct ext4_group_desc *gdp;
  4391. struct buffer_head *bh;
  4392. struct super_block *sb = inode->i_sb;
  4393. ext4_fsblk_t block;
  4394. int inodes_per_block, inode_offset;
  4395. iloc->bh = NULL;
  4396. if (!ext4_valid_inum(sb, inode->i_ino))
  4397. return -EIO;
  4398. iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
  4399. gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
  4400. if (!gdp)
  4401. return -EIO;
  4402. /*
  4403. * Figure out the offset within the block group inode table
  4404. */
  4405. inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
  4406. inode_offset = ((inode->i_ino - 1) %
  4407. EXT4_INODES_PER_GROUP(sb));
  4408. block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
  4409. iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
  4410. bh = sb_getblk(sb, block);
  4411. if (!bh) {
  4412. ext4_error(sb, "unable to read inode block - "
  4413. "inode=%lu, block=%llu", inode->i_ino, block);
  4414. return -EIO;
  4415. }
  4416. if (!buffer_uptodate(bh)) {
  4417. lock_buffer(bh);
  4418. /*
  4419. * If the buffer has the write error flag, we have failed
  4420. * to write out another inode in the same block. In this
  4421. * case, we don't have to read the block because we may
  4422. * read the old inode data successfully.
  4423. */
  4424. if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
  4425. set_buffer_uptodate(bh);
  4426. if (buffer_uptodate(bh)) {
  4427. /* someone brought it uptodate while we waited */
  4428. unlock_buffer(bh);
  4429. goto has_buffer;
  4430. }
  4431. /*
  4432. * If we have all information of the inode in memory and this
  4433. * is the only valid inode in the block, we need not read the
  4434. * block.
  4435. */
  4436. if (in_mem) {
  4437. struct buffer_head *bitmap_bh;
  4438. int i, start;
  4439. start = inode_offset & ~(inodes_per_block - 1);
  4440. /* Is the inode bitmap in cache? */
  4441. bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
  4442. if (!bitmap_bh)
  4443. goto make_io;
  4444. /*
  4445. * If the inode bitmap isn't in cache then the
  4446. * optimisation may end up performing two reads instead
  4447. * of one, so skip it.
  4448. */
  4449. if (!buffer_uptodate(bitmap_bh)) {
  4450. brelse(bitmap_bh);
  4451. goto make_io;
  4452. }
  4453. for (i = start; i < start + inodes_per_block; i++) {
  4454. if (i == inode_offset)
  4455. continue;
  4456. if (ext4_test_bit(i, bitmap_bh->b_data))
  4457. break;
  4458. }
  4459. brelse(bitmap_bh);
  4460. if (i == start + inodes_per_block) {
  4461. /* all other inodes are free, so skip I/O */
  4462. memset(bh->b_data, 0, bh->b_size);
  4463. set_buffer_uptodate(bh);
  4464. unlock_buffer(bh);
  4465. goto has_buffer;
  4466. }
  4467. }
  4468. make_io:
  4469. /*
  4470. * If we need to do any I/O, try to pre-readahead extra
  4471. * blocks from the inode table.
  4472. */
  4473. if (EXT4_SB(sb)->s_inode_readahead_blks) {
  4474. ext4_fsblk_t b, end, table;
  4475. unsigned num;
  4476. table = ext4_inode_table(sb, gdp);
  4477. /* s_inode_readahead_blks is always a power of 2 */
  4478. b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
  4479. if (table > b)
  4480. b = table;
  4481. end = b + EXT4_SB(sb)->s_inode_readahead_blks;
  4482. num = EXT4_INODES_PER_GROUP(sb);
  4483. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  4484. EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
  4485. num -= ext4_itable_unused_count(sb, gdp);
  4486. table += num / inodes_per_block;
  4487. if (end > table)
  4488. end = table;
  4489. while (b <= end)
  4490. sb_breadahead(sb, b++);
  4491. }
  4492. /*
  4493. * There are other valid inodes in the buffer, this inode
  4494. * has in-inode xattrs, or we don't have this inode in memory.
  4495. * Read the block from disk.
  4496. */
  4497. get_bh(bh);
  4498. bh->b_end_io = end_buffer_read_sync;
  4499. submit_bh(READ_META, bh);
  4500. wait_on_buffer(bh);
  4501. if (!buffer_uptodate(bh)) {
  4502. ext4_error(sb, "unable to read inode block - inode=%lu,"
  4503. " block=%llu", inode->i_ino, block);
  4504. brelse(bh);
  4505. return -EIO;
  4506. }
  4507. }
  4508. has_buffer:
  4509. iloc->bh = bh;
  4510. return 0;
  4511. }
  4512. int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
  4513. {
  4514. /* We have all inode data except xattrs in memory here. */
  4515. return __ext4_get_inode_loc(inode, iloc,
  4516. !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
  4517. }
  4518. void ext4_set_inode_flags(struct inode *inode)
  4519. {
  4520. unsigned int flags = EXT4_I(inode)->i_flags;
  4521. inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
  4522. if (flags & EXT4_SYNC_FL)
  4523. inode->i_flags |= S_SYNC;
  4524. if (flags & EXT4_APPEND_FL)
  4525. inode->i_flags |= S_APPEND;
  4526. if (flags & EXT4_IMMUTABLE_FL)
  4527. inode->i_flags |= S_IMMUTABLE;
  4528. if (flags & EXT4_NOATIME_FL)
  4529. inode->i_flags |= S_NOATIME;
  4530. if (flags & EXT4_DIRSYNC_FL)
  4531. inode->i_flags |= S_DIRSYNC;
  4532. }
  4533. /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
  4534. void ext4_get_inode_flags(struct ext4_inode_info *ei)
  4535. {
  4536. unsigned int flags = ei->vfs_inode.i_flags;
  4537. ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
  4538. EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
  4539. if (flags & S_SYNC)
  4540. ei->i_flags |= EXT4_SYNC_FL;
  4541. if (flags & S_APPEND)
  4542. ei->i_flags |= EXT4_APPEND_FL;
  4543. if (flags & S_IMMUTABLE)
  4544. ei->i_flags |= EXT4_IMMUTABLE_FL;
  4545. if (flags & S_NOATIME)
  4546. ei->i_flags |= EXT4_NOATIME_FL;
  4547. if (flags & S_DIRSYNC)
  4548. ei->i_flags |= EXT4_DIRSYNC_FL;
  4549. }
  4550. static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
  4551. struct ext4_inode_info *ei)
  4552. {
  4553. blkcnt_t i_blocks ;
  4554. struct inode *inode = &(ei->vfs_inode);
  4555. struct super_block *sb = inode->i_sb;
  4556. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  4557. EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
  4558. /* we are using combined 48 bit field */
  4559. i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
  4560. le32_to_cpu(raw_inode->i_blocks_lo);
  4561. if (ei->i_flags & EXT4_HUGE_FILE_FL) {
  4562. /* i_blocks represent file system block size */
  4563. return i_blocks << (inode->i_blkbits - 9);
  4564. } else {
  4565. return i_blocks;
  4566. }
  4567. } else {
  4568. return le32_to_cpu(raw_inode->i_blocks_lo);
  4569. }
  4570. }
  4571. struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
  4572. {
  4573. struct ext4_iloc iloc;
  4574. struct ext4_inode *raw_inode;
  4575. struct ext4_inode_info *ei;
  4576. struct inode *inode;
  4577. journal_t *journal = EXT4_SB(sb)->s_journal;
  4578. long ret;
  4579. int block;
  4580. inode = iget_locked(sb, ino);
  4581. if (!inode)
  4582. return ERR_PTR(-ENOMEM);
  4583. if (!(inode->i_state & I_NEW))
  4584. return inode;
  4585. ei = EXT4_I(inode);
  4586. iloc.bh = 0;
  4587. ret = __ext4_get_inode_loc(inode, &iloc, 0);
  4588. if (ret < 0)
  4589. goto bad_inode;
  4590. raw_inode = ext4_raw_inode(&iloc);
  4591. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  4592. inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
  4593. inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
  4594. if (!(test_opt(inode->i_sb, NO_UID32))) {
  4595. inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
  4596. inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
  4597. }
  4598. inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
  4599. ei->i_state_flags = 0;
  4600. ei->i_dir_start_lookup = 0;
  4601. ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
  4602. /* We now have enough fields to check if the inode was active or not.
  4603. * This is needed because nfsd might try to access dead inodes
  4604. * the test is that same one that e2fsck uses
  4605. * NeilBrown 1999oct15
  4606. */
  4607. if (inode->i_nlink == 0) {
  4608. if (inode->i_mode == 0 ||
  4609. !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
  4610. /* this inode is deleted */
  4611. ret = -ESTALE;
  4612. goto bad_inode;
  4613. }
  4614. /* The only unlinked inodes we let through here have
  4615. * valid i_mode and are being read by the orphan
  4616. * recovery code: that's fine, we're about to complete
  4617. * the process of deleting those. */
  4618. }
  4619. ei->i_flags = le32_to_cpu(raw_inode->i_flags);
  4620. inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
  4621. ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
  4622. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
  4623. ei->i_file_acl |=
  4624. ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
  4625. inode->i_size = ext4_isize(raw_inode);
  4626. ei->i_disksize = inode->i_size;
  4627. #ifdef CONFIG_QUOTA
  4628. ei->i_reserved_quota = 0;
  4629. #endif
  4630. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  4631. ei->i_block_group = iloc.block_group;
  4632. ei->i_last_alloc_group = ~0;
  4633. /*
  4634. * NOTE! The in-memory inode i_data array is in little-endian order
  4635. * even on big-endian machines: we do NOT byteswap the block numbers!
  4636. */
  4637. for (block = 0; block < EXT4_N_BLOCKS; block++)
  4638. ei->i_data[block] = raw_inode->i_block[block];
  4639. INIT_LIST_HEAD(&ei->i_orphan);
  4640. /*
  4641. * Set transaction id's of transactions that have to be committed
  4642. * to finish f[data]sync. We set them to currently running transaction
  4643. * as we cannot be sure that the inode or some of its metadata isn't
  4644. * part of the transaction - the inode could have been reclaimed and
  4645. * now it is reread from disk.
  4646. */
  4647. if (journal) {
  4648. transaction_t *transaction;
  4649. tid_t tid;
  4650. spin_lock(&journal->j_state_lock);
  4651. if (journal->j_running_transaction)
  4652. transaction = journal->j_running_transaction;
  4653. else
  4654. transaction = journal->j_committing_transaction;
  4655. if (transaction)
  4656. tid = transaction->t_tid;
  4657. else
  4658. tid = journal->j_commit_sequence;
  4659. spin_unlock(&journal->j_state_lock);
  4660. ei->i_sync_tid = tid;
  4661. ei->i_datasync_tid = tid;
  4662. }
  4663. if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  4664. ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
  4665. if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
  4666. EXT4_INODE_SIZE(inode->i_sb)) {
  4667. ret = -EIO;
  4668. goto bad_inode;
  4669. }
  4670. if (ei->i_extra_isize == 0) {
  4671. /* The extra space is currently unused. Use it. */
  4672. ei->i_extra_isize = sizeof(struct ext4_inode) -
  4673. EXT4_GOOD_OLD_INODE_SIZE;
  4674. } else {
  4675. __le32 *magic = (void *)raw_inode +
  4676. EXT4_GOOD_OLD_INODE_SIZE +
  4677. ei->i_extra_isize;
  4678. if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
  4679. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  4680. }
  4681. } else
  4682. ei->i_extra_isize = 0;
  4683. EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
  4684. EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
  4685. EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
  4686. EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
  4687. inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
  4688. if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  4689. if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
  4690. inode->i_version |=
  4691. (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
  4692. }
  4693. ret = 0;
  4694. if (ei->i_file_acl &&
  4695. !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
  4696. ext4_error(sb, "bad extended attribute block %llu inode #%lu",
  4697. ei->i_file_acl, inode->i_ino);
  4698. ret = -EIO;
  4699. goto bad_inode;
  4700. } else if (ei->i_flags & EXT4_EXTENTS_FL) {
  4701. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  4702. (S_ISLNK(inode->i_mode) &&
  4703. !ext4_inode_is_fast_symlink(inode)))
  4704. /* Validate extent which is part of inode */
  4705. ret = ext4_ext_check_inode(inode);
  4706. } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  4707. (S_ISLNK(inode->i_mode) &&
  4708. !ext4_inode_is_fast_symlink(inode))) {
  4709. /* Validate block references which are part of inode */
  4710. ret = ext4_check_inode_blockref(inode);
  4711. }
  4712. if (ret)
  4713. goto bad_inode;
  4714. if (S_ISREG(inode->i_mode)) {
  4715. inode->i_op = &ext4_file_inode_operations;
  4716. inode->i_fop = &ext4_file_operations;
  4717. ext4_set_aops(inode);
  4718. } else if (S_ISDIR(inode->i_mode)) {
  4719. inode->i_op = &ext4_dir_inode_operations;
  4720. inode->i_fop = &ext4_dir_operations;
  4721. } else if (S_ISLNK(inode->i_mode)) {
  4722. if (ext4_inode_is_fast_symlink(inode)) {
  4723. inode->i_op = &ext4_fast_symlink_inode_operations;
  4724. nd_terminate_link(ei->i_data, inode->i_size,
  4725. sizeof(ei->i_data) - 1);
  4726. } else {
  4727. inode->i_op = &ext4_symlink_inode_operations;
  4728. ext4_set_aops(inode);
  4729. }
  4730. } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  4731. S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  4732. inode->i_op = &ext4_special_inode_operations;
  4733. if (raw_inode->i_block[0])
  4734. init_special_inode(inode, inode->i_mode,
  4735. old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
  4736. else
  4737. init_special_inode(inode, inode->i_mode,
  4738. new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
  4739. } else {
  4740. ret = -EIO;
  4741. ext4_error(inode->i_sb, "bogus i_mode (%o) for inode=%lu",
  4742. inode->i_mode, inode->i_ino);
  4743. goto bad_inode;
  4744. }
  4745. brelse(iloc.bh);
  4746. ext4_set_inode_flags(inode);
  4747. unlock_new_inode(inode);
  4748. return inode;
  4749. bad_inode:
  4750. brelse(iloc.bh);
  4751. iget_failed(inode);
  4752. return ERR_PTR(ret);
  4753. }
  4754. static int ext4_inode_blocks_set(handle_t *handle,
  4755. struct ext4_inode *raw_inode,
  4756. struct ext4_inode_info *ei)
  4757. {
  4758. struct inode *inode = &(ei->vfs_inode);
  4759. u64 i_blocks = inode->i_blocks;
  4760. struct super_block *sb = inode->i_sb;
  4761. if (i_blocks <= ~0U) {
  4762. /*
  4763. * i_blocks can be represnted in a 32 bit variable
  4764. * as multiple of 512 bytes
  4765. */
  4766. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  4767. raw_inode->i_blocks_high = 0;
  4768. ei->i_flags &= ~EXT4_HUGE_FILE_FL;
  4769. return 0;
  4770. }
  4771. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
  4772. return -EFBIG;
  4773. if (i_blocks <= 0xffffffffffffULL) {
  4774. /*
  4775. * i_blocks can be represented in a 48 bit variable
  4776. * as multiple of 512 bytes
  4777. */
  4778. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  4779. raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
  4780. ei->i_flags &= ~EXT4_HUGE_FILE_FL;
  4781. } else {
  4782. ei->i_flags |= EXT4_HUGE_FILE_FL;
  4783. /* i_block is stored in file system block size */
  4784. i_blocks = i_blocks >> (inode->i_blkbits - 9);
  4785. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  4786. raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
  4787. }
  4788. return 0;
  4789. }
  4790. /*
  4791. * Post the struct inode info into an on-disk inode location in the
  4792. * buffer-cache. This gobbles the caller's reference to the
  4793. * buffer_head in the inode location struct.
  4794. *
  4795. * The caller must have write access to iloc->bh.
  4796. */
  4797. static int ext4_do_update_inode(handle_t *handle,
  4798. struct inode *inode,
  4799. struct ext4_iloc *iloc)
  4800. {
  4801. struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
  4802. struct ext4_inode_info *ei = EXT4_I(inode);
  4803. struct buffer_head *bh = iloc->bh;
  4804. int err = 0, rc, block;
  4805. /* For fields not not tracking in the in-memory inode,
  4806. * initialise them to zero for new inodes. */
  4807. if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
  4808. memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
  4809. ext4_get_inode_flags(ei);
  4810. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  4811. if (!(test_opt(inode->i_sb, NO_UID32))) {
  4812. raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
  4813. raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
  4814. /*
  4815. * Fix up interoperability with old kernels. Otherwise, old inodes get
  4816. * re-used with the upper 16 bits of the uid/gid intact
  4817. */
  4818. if (!ei->i_dtime) {
  4819. raw_inode->i_uid_high =
  4820. cpu_to_le16(high_16_bits(inode->i_uid));
  4821. raw_inode->i_gid_high =
  4822. cpu_to_le16(high_16_bits(inode->i_gid));
  4823. } else {
  4824. raw_inode->i_uid_high = 0;
  4825. raw_inode->i_gid_high = 0;
  4826. }
  4827. } else {
  4828. raw_inode->i_uid_low =
  4829. cpu_to_le16(fs_high2lowuid(inode->i_uid));
  4830. raw_inode->i_gid_low =
  4831. cpu_to_le16(fs_high2lowgid(inode->i_gid));
  4832. raw_inode->i_uid_high = 0;
  4833. raw_inode->i_gid_high = 0;
  4834. }
  4835. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  4836. EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
  4837. EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
  4838. EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
  4839. EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
  4840. if (ext4_inode_blocks_set(handle, raw_inode, ei))
  4841. goto out_brelse;
  4842. raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
  4843. raw_inode->i_flags = cpu_to_le32(ei->i_flags);
  4844. if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  4845. cpu_to_le32(EXT4_OS_HURD))
  4846. raw_inode->i_file_acl_high =
  4847. cpu_to_le16(ei->i_file_acl >> 32);
  4848. raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
  4849. ext4_isize_set(raw_inode, ei->i_disksize);
  4850. if (ei->i_disksize > 0x7fffffffULL) {
  4851. struct super_block *sb = inode->i_sb;
  4852. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
  4853. EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
  4854. EXT4_SB(sb)->s_es->s_rev_level ==
  4855. cpu_to_le32(EXT4_GOOD_OLD_REV)) {
  4856. /* If this is the first large file
  4857. * created, add a flag to the superblock.
  4858. */
  4859. err = ext4_journal_get_write_access(handle,
  4860. EXT4_SB(sb)->s_sbh);
  4861. if (err)
  4862. goto out_brelse;
  4863. ext4_update_dynamic_rev(sb);
  4864. EXT4_SET_RO_COMPAT_FEATURE(sb,
  4865. EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
  4866. sb->s_dirt = 1;
  4867. ext4_handle_sync(handle);
  4868. err = ext4_handle_dirty_metadata(handle, NULL,
  4869. EXT4_SB(sb)->s_sbh);
  4870. }
  4871. }
  4872. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  4873. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  4874. if (old_valid_dev(inode->i_rdev)) {
  4875. raw_inode->i_block[0] =
  4876. cpu_to_le32(old_encode_dev(inode->i_rdev));
  4877. raw_inode->i_block[1] = 0;
  4878. } else {
  4879. raw_inode->i_block[0] = 0;
  4880. raw_inode->i_block[1] =
  4881. cpu_to_le32(new_encode_dev(inode->i_rdev));
  4882. raw_inode->i_block[2] = 0;
  4883. }
  4884. } else
  4885. for (block = 0; block < EXT4_N_BLOCKS; block++)
  4886. raw_inode->i_block[block] = ei->i_data[block];
  4887. raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
  4888. if (ei->i_extra_isize) {
  4889. if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
  4890. raw_inode->i_version_hi =
  4891. cpu_to_le32(inode->i_version >> 32);
  4892. raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
  4893. }
  4894. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  4895. rc = ext4_handle_dirty_metadata(handle, NULL, bh);
  4896. if (!err)
  4897. err = rc;
  4898. ext4_clear_inode_state(inode, EXT4_STATE_NEW);
  4899. ext4_update_inode_fsync_trans(handle, inode, 0);
  4900. out_brelse:
  4901. brelse(bh);
  4902. ext4_std_error(inode->i_sb, err);
  4903. return err;
  4904. }
  4905. /*
  4906. * ext4_write_inode()
  4907. *
  4908. * We are called from a few places:
  4909. *
  4910. * - Within generic_file_write() for O_SYNC files.
  4911. * Here, there will be no transaction running. We wait for any running
  4912. * trasnaction to commit.
  4913. *
  4914. * - Within sys_sync(), kupdate and such.
  4915. * We wait on commit, if tol to.
  4916. *
  4917. * - Within prune_icache() (PF_MEMALLOC == true)
  4918. * Here we simply return. We can't afford to block kswapd on the
  4919. * journal commit.
  4920. *
  4921. * In all cases it is actually safe for us to return without doing anything,
  4922. * because the inode has been copied into a raw inode buffer in
  4923. * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
  4924. * knfsd.
  4925. *
  4926. * Note that we are absolutely dependent upon all inode dirtiers doing the
  4927. * right thing: they *must* call mark_inode_dirty() after dirtying info in
  4928. * which we are interested.
  4929. *
  4930. * It would be a bug for them to not do this. The code:
  4931. *
  4932. * mark_inode_dirty(inode)
  4933. * stuff();
  4934. * inode->i_size = expr;
  4935. *
  4936. * is in error because a kswapd-driven write_inode() could occur while
  4937. * `stuff()' is running, and the new i_size will be lost. Plus the inode
  4938. * will no longer be on the superblock's dirty inode list.
  4939. */
  4940. int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
  4941. {
  4942. int err;
  4943. if (current->flags & PF_MEMALLOC)
  4944. return 0;
  4945. if (EXT4_SB(inode->i_sb)->s_journal) {
  4946. if (ext4_journal_current_handle()) {
  4947. jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
  4948. dump_stack();
  4949. return -EIO;
  4950. }
  4951. if (wbc->sync_mode != WB_SYNC_ALL)
  4952. return 0;
  4953. err = ext4_force_commit(inode->i_sb);
  4954. } else {
  4955. struct ext4_iloc iloc;
  4956. err = __ext4_get_inode_loc(inode, &iloc, 0);
  4957. if (err)
  4958. return err;
  4959. if (wbc->sync_mode == WB_SYNC_ALL)
  4960. sync_dirty_buffer(iloc.bh);
  4961. if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
  4962. ext4_error(inode->i_sb, "IO error syncing inode, "
  4963. "inode=%lu, block=%llu", inode->i_ino,
  4964. (unsigned long long)iloc.bh->b_blocknr);
  4965. err = -EIO;
  4966. }
  4967. brelse(iloc.bh);
  4968. }
  4969. return err;
  4970. }
  4971. /*
  4972. * ext4_setattr()
  4973. *
  4974. * Called from notify_change.
  4975. *
  4976. * We want to trap VFS attempts to truncate the file as soon as
  4977. * possible. In particular, we want to make sure that when the VFS
  4978. * shrinks i_size, we put the inode on the orphan list and modify
  4979. * i_disksize immediately, so that during the subsequent flushing of
  4980. * dirty pages and freeing of disk blocks, we can guarantee that any
  4981. * commit will leave the blocks being flushed in an unused state on
  4982. * disk. (On recovery, the inode will get truncated and the blocks will
  4983. * be freed, so we have a strong guarantee that no future commit will
  4984. * leave these blocks visible to the user.)
  4985. *
  4986. * Another thing we have to assure is that if we are in ordered mode
  4987. * and inode is still attached to the committing transaction, we must
  4988. * we start writeout of all the dirty pages which are being truncated.
  4989. * This way we are sure that all the data written in the previous
  4990. * transaction are already on disk (truncate waits for pages under
  4991. * writeback).
  4992. *
  4993. * Called with inode->i_mutex down.
  4994. */
  4995. int ext4_setattr(struct dentry *dentry, struct iattr *attr)
  4996. {
  4997. struct inode *inode = dentry->d_inode;
  4998. int error, rc = 0;
  4999. const unsigned int ia_valid = attr->ia_valid;
  5000. error = inode_change_ok(inode, attr);
  5001. if (error)
  5002. return error;
  5003. if (ia_valid & ATTR_SIZE)
  5004. dquot_initialize(inode);
  5005. if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
  5006. (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
  5007. handle_t *handle;
  5008. /* (user+group)*(old+new) structure, inode write (sb,
  5009. * inode block, ? - but truncate inode update has it) */
  5010. handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
  5011. EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
  5012. if (IS_ERR(handle)) {
  5013. error = PTR_ERR(handle);
  5014. goto err_out;
  5015. }
  5016. error = dquot_transfer(inode, attr);
  5017. if (error) {
  5018. ext4_journal_stop(handle);
  5019. return error;
  5020. }
  5021. /* Update corresponding info in inode so that everything is in
  5022. * one transaction */
  5023. if (attr->ia_valid & ATTR_UID)
  5024. inode->i_uid = attr->ia_uid;
  5025. if (attr->ia_valid & ATTR_GID)
  5026. inode->i_gid = attr->ia_gid;
  5027. error = ext4_mark_inode_dirty(handle, inode);
  5028. ext4_journal_stop(handle);
  5029. }
  5030. if (attr->ia_valid & ATTR_SIZE) {
  5031. if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
  5032. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  5033. if (attr->ia_size > sbi->s_bitmap_maxbytes) {
  5034. error = -EFBIG;
  5035. goto err_out;
  5036. }
  5037. }
  5038. }
  5039. if (S_ISREG(inode->i_mode) &&
  5040. attr->ia_valid & ATTR_SIZE &&
  5041. (attr->ia_size < inode->i_size ||
  5042. (EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))) {
  5043. handle_t *handle;
  5044. handle = ext4_journal_start(inode, 3);
  5045. if (IS_ERR(handle)) {
  5046. error = PTR_ERR(handle);
  5047. goto err_out;
  5048. }
  5049. error = ext4_orphan_add(handle, inode);
  5050. EXT4_I(inode)->i_disksize = attr->ia_size;
  5051. rc = ext4_mark_inode_dirty(handle, inode);
  5052. if (!error)
  5053. error = rc;
  5054. ext4_journal_stop(handle);
  5055. if (ext4_should_order_data(inode)) {
  5056. error = ext4_begin_ordered_truncate(inode,
  5057. attr->ia_size);
  5058. if (error) {
  5059. /* Do as much error cleanup as possible */
  5060. handle = ext4_journal_start(inode, 3);
  5061. if (IS_ERR(handle)) {
  5062. ext4_orphan_del(NULL, inode);
  5063. goto err_out;
  5064. }
  5065. ext4_orphan_del(handle, inode);
  5066. ext4_journal_stop(handle);
  5067. goto err_out;
  5068. }
  5069. }
  5070. /* ext4_truncate will clear the flag */
  5071. if ((EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))
  5072. ext4_truncate(inode);
  5073. }
  5074. rc = inode_setattr(inode, attr);
  5075. /* If inode_setattr's call to ext4_truncate failed to get a
  5076. * transaction handle at all, we need to clean up the in-core
  5077. * orphan list manually. */
  5078. if (inode->i_nlink)
  5079. ext4_orphan_del(NULL, inode);
  5080. if (!rc && (ia_valid & ATTR_MODE))
  5081. rc = ext4_acl_chmod(inode);
  5082. err_out:
  5083. ext4_std_error(inode->i_sb, error);
  5084. if (!error)
  5085. error = rc;
  5086. return error;
  5087. }
  5088. int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
  5089. struct kstat *stat)
  5090. {
  5091. struct inode *inode;
  5092. unsigned long delalloc_blocks;
  5093. inode = dentry->d_inode;
  5094. generic_fillattr(inode, stat);
  5095. /*
  5096. * We can't update i_blocks if the block allocation is delayed
  5097. * otherwise in the case of system crash before the real block
  5098. * allocation is done, we will have i_blocks inconsistent with
  5099. * on-disk file blocks.
  5100. * We always keep i_blocks updated together with real
  5101. * allocation. But to not confuse with user, stat
  5102. * will return the blocks that include the delayed allocation
  5103. * blocks for this file.
  5104. */
  5105. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  5106. delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
  5107. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  5108. stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
  5109. return 0;
  5110. }
  5111. static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
  5112. int chunk)
  5113. {
  5114. int indirects;
  5115. /* if nrblocks are contiguous */
  5116. if (chunk) {
  5117. /*
  5118. * With N contiguous data blocks, it need at most
  5119. * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
  5120. * 2 dindirect blocks
  5121. * 1 tindirect block
  5122. */
  5123. indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
  5124. return indirects + 3;
  5125. }
  5126. /*
  5127. * if nrblocks are not contiguous, worse case, each block touch
  5128. * a indirect block, and each indirect block touch a double indirect
  5129. * block, plus a triple indirect block
  5130. */
  5131. indirects = nrblocks * 2 + 1;
  5132. return indirects;
  5133. }
  5134. static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  5135. {
  5136. if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
  5137. return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
  5138. return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
  5139. }
  5140. /*
  5141. * Account for index blocks, block groups bitmaps and block group
  5142. * descriptor blocks if modify datablocks and index blocks
  5143. * worse case, the indexs blocks spread over different block groups
  5144. *
  5145. * If datablocks are discontiguous, they are possible to spread over
  5146. * different block groups too. If they are contiuguous, with flexbg,
  5147. * they could still across block group boundary.
  5148. *
  5149. * Also account for superblock, inode, quota and xattr blocks
  5150. */
  5151. int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  5152. {
  5153. ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
  5154. int gdpblocks;
  5155. int idxblocks;
  5156. int ret = 0;
  5157. /*
  5158. * How many index blocks need to touch to modify nrblocks?
  5159. * The "Chunk" flag indicating whether the nrblocks is
  5160. * physically contiguous on disk
  5161. *
  5162. * For Direct IO and fallocate, they calls get_block to allocate
  5163. * one single extent at a time, so they could set the "Chunk" flag
  5164. */
  5165. idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
  5166. ret = idxblocks;
  5167. /*
  5168. * Now let's see how many group bitmaps and group descriptors need
  5169. * to account
  5170. */
  5171. groups = idxblocks;
  5172. if (chunk)
  5173. groups += 1;
  5174. else
  5175. groups += nrblocks;
  5176. gdpblocks = groups;
  5177. if (groups > ngroups)
  5178. groups = ngroups;
  5179. if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
  5180. gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
  5181. /* bitmaps and block group descriptor blocks */
  5182. ret += groups + gdpblocks;
  5183. /* Blocks for super block, inode, quota and xattr blocks */
  5184. ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
  5185. return ret;
  5186. }
  5187. /*
  5188. * Calulate the total number of credits to reserve to fit
  5189. * the modification of a single pages into a single transaction,
  5190. * which may include multiple chunks of block allocations.
  5191. *
  5192. * This could be called via ext4_write_begin()
  5193. *
  5194. * We need to consider the worse case, when
  5195. * one new block per extent.
  5196. */
  5197. int ext4_writepage_trans_blocks(struct inode *inode)
  5198. {
  5199. int bpp = ext4_journal_blocks_per_page(inode);
  5200. int ret;
  5201. ret = ext4_meta_trans_blocks(inode, bpp, 0);
  5202. /* Account for data blocks for journalled mode */
  5203. if (ext4_should_journal_data(inode))
  5204. ret += bpp;
  5205. return ret;
  5206. }
  5207. /*
  5208. * Calculate the journal credits for a chunk of data modification.
  5209. *
  5210. * This is called from DIO, fallocate or whoever calling
  5211. * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
  5212. *
  5213. * journal buffers for data blocks are not included here, as DIO
  5214. * and fallocate do no need to journal data buffers.
  5215. */
  5216. int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
  5217. {
  5218. return ext4_meta_trans_blocks(inode, nrblocks, 1);
  5219. }
  5220. /*
  5221. * The caller must have previously called ext4_reserve_inode_write().
  5222. * Give this, we know that the caller already has write access to iloc->bh.
  5223. */
  5224. int ext4_mark_iloc_dirty(handle_t *handle,
  5225. struct inode *inode, struct ext4_iloc *iloc)
  5226. {
  5227. int err = 0;
  5228. if (test_opt(inode->i_sb, I_VERSION))
  5229. inode_inc_iversion(inode);
  5230. /* the do_update_inode consumes one bh->b_count */
  5231. get_bh(iloc->bh);
  5232. /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
  5233. err = ext4_do_update_inode(handle, inode, iloc);
  5234. put_bh(iloc->bh);
  5235. return err;
  5236. }
  5237. /*
  5238. * On success, We end up with an outstanding reference count against
  5239. * iloc->bh. This _must_ be cleaned up later.
  5240. */
  5241. int
  5242. ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
  5243. struct ext4_iloc *iloc)
  5244. {
  5245. int err;
  5246. err = ext4_get_inode_loc(inode, iloc);
  5247. if (!err) {
  5248. BUFFER_TRACE(iloc->bh, "get_write_access");
  5249. err = ext4_journal_get_write_access(handle, iloc->bh);
  5250. if (err) {
  5251. brelse(iloc->bh);
  5252. iloc->bh = NULL;
  5253. }
  5254. }
  5255. ext4_std_error(inode->i_sb, err);
  5256. return err;
  5257. }
  5258. /*
  5259. * Expand an inode by new_extra_isize bytes.
  5260. * Returns 0 on success or negative error number on failure.
  5261. */
  5262. static int ext4_expand_extra_isize(struct inode *inode,
  5263. unsigned int new_extra_isize,
  5264. struct ext4_iloc iloc,
  5265. handle_t *handle)
  5266. {
  5267. struct ext4_inode *raw_inode;
  5268. struct ext4_xattr_ibody_header *header;
  5269. struct ext4_xattr_entry *entry;
  5270. if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
  5271. return 0;
  5272. raw_inode = ext4_raw_inode(&iloc);
  5273. header = IHDR(inode, raw_inode);
  5274. entry = IFIRST(header);
  5275. /* No extended attributes present */
  5276. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
  5277. header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
  5278. memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
  5279. new_extra_isize);
  5280. EXT4_I(inode)->i_extra_isize = new_extra_isize;
  5281. return 0;
  5282. }
  5283. /* try to expand with EAs present */
  5284. return ext4_expand_extra_isize_ea(inode, new_extra_isize,
  5285. raw_inode, handle);
  5286. }
  5287. /*
  5288. * What we do here is to mark the in-core inode as clean with respect to inode
  5289. * dirtiness (it may still be data-dirty).
  5290. * This means that the in-core inode may be reaped by prune_icache
  5291. * without having to perform any I/O. This is a very good thing,
  5292. * because *any* task may call prune_icache - even ones which
  5293. * have a transaction open against a different journal.
  5294. *
  5295. * Is this cheating? Not really. Sure, we haven't written the
  5296. * inode out, but prune_icache isn't a user-visible syncing function.
  5297. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
  5298. * we start and wait on commits.
  5299. *
  5300. * Is this efficient/effective? Well, we're being nice to the system
  5301. * by cleaning up our inodes proactively so they can be reaped
  5302. * without I/O. But we are potentially leaving up to five seconds'
  5303. * worth of inodes floating about which prune_icache wants us to
  5304. * write out. One way to fix that would be to get prune_icache()
  5305. * to do a write_super() to free up some memory. It has the desired
  5306. * effect.
  5307. */
  5308. int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  5309. {
  5310. struct ext4_iloc iloc;
  5311. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  5312. static unsigned int mnt_count;
  5313. int err, ret;
  5314. might_sleep();
  5315. err = ext4_reserve_inode_write(handle, inode, &iloc);
  5316. if (ext4_handle_valid(handle) &&
  5317. EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
  5318. !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
  5319. /*
  5320. * We need extra buffer credits since we may write into EA block
  5321. * with this same handle. If journal_extend fails, then it will
  5322. * only result in a minor loss of functionality for that inode.
  5323. * If this is felt to be critical, then e2fsck should be run to
  5324. * force a large enough s_min_extra_isize.
  5325. */
  5326. if ((jbd2_journal_extend(handle,
  5327. EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
  5328. ret = ext4_expand_extra_isize(inode,
  5329. sbi->s_want_extra_isize,
  5330. iloc, handle);
  5331. if (ret) {
  5332. ext4_set_inode_state(inode,
  5333. EXT4_STATE_NO_EXPAND);
  5334. if (mnt_count !=
  5335. le16_to_cpu(sbi->s_es->s_mnt_count)) {
  5336. ext4_warning(inode->i_sb,
  5337. "Unable to expand inode %lu. Delete"
  5338. " some EAs or run e2fsck.",
  5339. inode->i_ino);
  5340. mnt_count =
  5341. le16_to_cpu(sbi->s_es->s_mnt_count);
  5342. }
  5343. }
  5344. }
  5345. }
  5346. if (!err)
  5347. err = ext4_mark_iloc_dirty(handle, inode, &iloc);
  5348. return err;
  5349. }
  5350. /*
  5351. * ext4_dirty_inode() is called from __mark_inode_dirty()
  5352. *
  5353. * We're really interested in the case where a file is being extended.
  5354. * i_size has been changed by generic_commit_write() and we thus need
  5355. * to include the updated inode in the current transaction.
  5356. *
  5357. * Also, dquot_alloc_block() will always dirty the inode when blocks
  5358. * are allocated to the file.
  5359. *
  5360. * If the inode is marked synchronous, we don't honour that here - doing
  5361. * so would cause a commit on atime updates, which we don't bother doing.
  5362. * We handle synchronous inodes at the highest possible level.
  5363. */
  5364. void ext4_dirty_inode(struct inode *inode)
  5365. {
  5366. handle_t *handle;
  5367. handle = ext4_journal_start(inode, 2);
  5368. if (IS_ERR(handle))
  5369. goto out;
  5370. ext4_mark_inode_dirty(handle, inode);
  5371. ext4_journal_stop(handle);
  5372. out:
  5373. return;
  5374. }
  5375. #if 0
  5376. /*
  5377. * Bind an inode's backing buffer_head into this transaction, to prevent
  5378. * it from being flushed to disk early. Unlike
  5379. * ext4_reserve_inode_write, this leaves behind no bh reference and
  5380. * returns no iloc structure, so the caller needs to repeat the iloc
  5381. * lookup to mark the inode dirty later.
  5382. */
  5383. static int ext4_pin_inode(handle_t *handle, struct inode *inode)
  5384. {
  5385. struct ext4_iloc iloc;
  5386. int err = 0;
  5387. if (handle) {
  5388. err = ext4_get_inode_loc(inode, &iloc);
  5389. if (!err) {
  5390. BUFFER_TRACE(iloc.bh, "get_write_access");
  5391. err = jbd2_journal_get_write_access(handle, iloc.bh);
  5392. if (!err)
  5393. err = ext4_handle_dirty_metadata(handle,
  5394. NULL,
  5395. iloc.bh);
  5396. brelse(iloc.bh);
  5397. }
  5398. }
  5399. ext4_std_error(inode->i_sb, err);
  5400. return err;
  5401. }
  5402. #endif
  5403. int ext4_change_inode_journal_flag(struct inode *inode, int val)
  5404. {
  5405. journal_t *journal;
  5406. handle_t *handle;
  5407. int err;
  5408. /*
  5409. * We have to be very careful here: changing a data block's
  5410. * journaling status dynamically is dangerous. If we write a
  5411. * data block to the journal, change the status and then delete
  5412. * that block, we risk forgetting to revoke the old log record
  5413. * from the journal and so a subsequent replay can corrupt data.
  5414. * So, first we make sure that the journal is empty and that
  5415. * nobody is changing anything.
  5416. */
  5417. journal = EXT4_JOURNAL(inode);
  5418. if (!journal)
  5419. return 0;
  5420. if (is_journal_aborted(journal))
  5421. return -EROFS;
  5422. jbd2_journal_lock_updates(journal);
  5423. jbd2_journal_flush(journal);
  5424. /*
  5425. * OK, there are no updates running now, and all cached data is
  5426. * synced to disk. We are now in a completely consistent state
  5427. * which doesn't have anything in the journal, and we know that
  5428. * no filesystem updates are running, so it is safe to modify
  5429. * the inode's in-core data-journaling state flag now.
  5430. */
  5431. if (val)
  5432. EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
  5433. else
  5434. EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
  5435. ext4_set_aops(inode);
  5436. jbd2_journal_unlock_updates(journal);
  5437. /* Finally we can mark the inode as dirty. */
  5438. handle = ext4_journal_start(inode, 1);
  5439. if (IS_ERR(handle))
  5440. return PTR_ERR(handle);
  5441. err = ext4_mark_inode_dirty(handle, inode);
  5442. ext4_handle_sync(handle);
  5443. ext4_journal_stop(handle);
  5444. ext4_std_error(inode->i_sb, err);
  5445. return err;
  5446. }
  5447. static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
  5448. {
  5449. return !buffer_mapped(bh);
  5450. }
  5451. int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  5452. {
  5453. struct page *page = vmf->page;
  5454. loff_t size;
  5455. unsigned long len;
  5456. int ret = -EINVAL;
  5457. void *fsdata;
  5458. struct file *file = vma->vm_file;
  5459. struct inode *inode = file->f_path.dentry->d_inode;
  5460. struct address_space *mapping = inode->i_mapping;
  5461. /*
  5462. * Get i_alloc_sem to stop truncates messing with the inode. We cannot
  5463. * get i_mutex because we are already holding mmap_sem.
  5464. */
  5465. down_read(&inode->i_alloc_sem);
  5466. size = i_size_read(inode);
  5467. if (page->mapping != mapping || size <= page_offset(page)
  5468. || !PageUptodate(page)) {
  5469. /* page got truncated from under us? */
  5470. goto out_unlock;
  5471. }
  5472. ret = 0;
  5473. if (PageMappedToDisk(page))
  5474. goto out_unlock;
  5475. if (page->index == size >> PAGE_CACHE_SHIFT)
  5476. len = size & ~PAGE_CACHE_MASK;
  5477. else
  5478. len = PAGE_CACHE_SIZE;
  5479. lock_page(page);
  5480. /*
  5481. * return if we have all the buffers mapped. This avoid
  5482. * the need to call write_begin/write_end which does a
  5483. * journal_start/journal_stop which can block and take
  5484. * long time
  5485. */
  5486. if (page_has_buffers(page)) {
  5487. if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
  5488. ext4_bh_unmapped)) {
  5489. unlock_page(page);
  5490. goto out_unlock;
  5491. }
  5492. }
  5493. unlock_page(page);
  5494. /*
  5495. * OK, we need to fill the hole... Do write_begin write_end
  5496. * to do block allocation/reservation.We are not holding
  5497. * inode.i__mutex here. That allow * parallel write_begin,
  5498. * write_end call. lock_page prevent this from happening
  5499. * on the same page though
  5500. */
  5501. ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
  5502. len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
  5503. if (ret < 0)
  5504. goto out_unlock;
  5505. ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
  5506. len, len, page, fsdata);
  5507. if (ret < 0)
  5508. goto out_unlock;
  5509. ret = 0;
  5510. out_unlock:
  5511. if (ret)
  5512. ret = VM_FAULT_SIGBUS;
  5513. up_read(&inode->i_alloc_sem);
  5514. return ret;
  5515. }