extents.c 163 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949
  1. /*
  2. * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
  3. * Written by Alex Tomas <alex@clusterfs.com>
  4. *
  5. * Architecture independence:
  6. * Copyright (c) 2005, Bull S.A.
  7. * Written by Pierre Peiffer <pierre.peiffer@bull.net>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
  21. */
  22. /*
  23. * Extents support for EXT4
  24. *
  25. * TODO:
  26. * - ext4*_error() should be used in some situations
  27. * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
  28. * - smart tree reduction
  29. */
  30. #include <linux/fs.h>
  31. #include <linux/time.h>
  32. #include <linux/jbd2.h>
  33. #include <linux/highuid.h>
  34. #include <linux/pagemap.h>
  35. #include <linux/quotaops.h>
  36. #include <linux/string.h>
  37. #include <linux/slab.h>
  38. #include <linux/uaccess.h>
  39. #include <linux/fiemap.h>
  40. #include <linux/backing-dev.h>
  41. #include "ext4_jbd2.h"
  42. #include "ext4_extents.h"
  43. #include "xattr.h"
  44. #include <trace/events/ext4.h>
  45. /*
  46. * used by extent splitting.
  47. */
  48. #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
  49. due to ENOSPC */
  50. #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
  51. #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
  52. #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
  53. #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
  54. static __le32 ext4_extent_block_csum(struct inode *inode,
  55. struct ext4_extent_header *eh)
  56. {
  57. struct ext4_inode_info *ei = EXT4_I(inode);
  58. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  59. __u32 csum;
  60. csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
  61. EXT4_EXTENT_TAIL_OFFSET(eh));
  62. return cpu_to_le32(csum);
  63. }
  64. static int ext4_extent_block_csum_verify(struct inode *inode,
  65. struct ext4_extent_header *eh)
  66. {
  67. struct ext4_extent_tail *et;
  68. if (!ext4_has_metadata_csum(inode->i_sb))
  69. return 1;
  70. et = find_ext4_extent_tail(eh);
  71. if (et->et_checksum != ext4_extent_block_csum(inode, eh))
  72. return 0;
  73. return 1;
  74. }
  75. static void ext4_extent_block_csum_set(struct inode *inode,
  76. struct ext4_extent_header *eh)
  77. {
  78. struct ext4_extent_tail *et;
  79. if (!ext4_has_metadata_csum(inode->i_sb))
  80. return;
  81. et = find_ext4_extent_tail(eh);
  82. et->et_checksum = ext4_extent_block_csum(inode, eh);
  83. }
  84. static int ext4_split_extent(handle_t *handle,
  85. struct inode *inode,
  86. struct ext4_ext_path **ppath,
  87. struct ext4_map_blocks *map,
  88. int split_flag,
  89. int flags);
  90. static int ext4_split_extent_at(handle_t *handle,
  91. struct inode *inode,
  92. struct ext4_ext_path **ppath,
  93. ext4_lblk_t split,
  94. int split_flag,
  95. int flags);
  96. static int ext4_find_delayed_extent(struct inode *inode,
  97. struct extent_status *newes);
  98. static int ext4_ext_truncate_extend_restart(handle_t *handle,
  99. struct inode *inode,
  100. int needed)
  101. {
  102. int err;
  103. if (!ext4_handle_valid(handle))
  104. return 0;
  105. if (handle->h_buffer_credits >= needed)
  106. return 0;
  107. /*
  108. * If we need to extend the journal get a few extra blocks
  109. * while we're at it for efficiency's sake.
  110. */
  111. needed += 3;
  112. err = ext4_journal_extend(handle, needed - handle->h_buffer_credits);
  113. if (err <= 0)
  114. return err;
  115. err = ext4_truncate_restart_trans(handle, inode, needed);
  116. if (err == 0)
  117. err = -EAGAIN;
  118. return err;
  119. }
  120. /*
  121. * could return:
  122. * - EROFS
  123. * - ENOMEM
  124. */
  125. static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
  126. struct ext4_ext_path *path)
  127. {
  128. if (path->p_bh) {
  129. /* path points to block */
  130. BUFFER_TRACE(path->p_bh, "get_write_access");
  131. return ext4_journal_get_write_access(handle, path->p_bh);
  132. }
  133. /* path points to leaf/index in inode body */
  134. /* we use in-core data, no need to protect them */
  135. return 0;
  136. }
  137. /*
  138. * could return:
  139. * - EROFS
  140. * - ENOMEM
  141. * - EIO
  142. */
  143. int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
  144. struct inode *inode, struct ext4_ext_path *path)
  145. {
  146. int err;
  147. WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
  148. if (path->p_bh) {
  149. ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
  150. /* path points to block */
  151. err = __ext4_handle_dirty_metadata(where, line, handle,
  152. inode, path->p_bh);
  153. } else {
  154. /* path points to leaf/index in inode body */
  155. err = ext4_mark_inode_dirty(handle, inode);
  156. }
  157. return err;
  158. }
  159. static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
  160. struct ext4_ext_path *path,
  161. ext4_lblk_t block)
  162. {
  163. if (path) {
  164. int depth = path->p_depth;
  165. struct ext4_extent *ex;
  166. /*
  167. * Try to predict block placement assuming that we are
  168. * filling in a file which will eventually be
  169. * non-sparse --- i.e., in the case of libbfd writing
  170. * an ELF object sections out-of-order but in a way
  171. * the eventually results in a contiguous object or
  172. * executable file, or some database extending a table
  173. * space file. However, this is actually somewhat
  174. * non-ideal if we are writing a sparse file such as
  175. * qemu or KVM writing a raw image file that is going
  176. * to stay fairly sparse, since it will end up
  177. * fragmenting the file system's free space. Maybe we
  178. * should have some hueristics or some way to allow
  179. * userspace to pass a hint to file system,
  180. * especially if the latter case turns out to be
  181. * common.
  182. */
  183. ex = path[depth].p_ext;
  184. if (ex) {
  185. ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
  186. ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
  187. if (block > ext_block)
  188. return ext_pblk + (block - ext_block);
  189. else
  190. return ext_pblk - (ext_block - block);
  191. }
  192. /* it looks like index is empty;
  193. * try to find starting block from index itself */
  194. if (path[depth].p_bh)
  195. return path[depth].p_bh->b_blocknr;
  196. }
  197. /* OK. use inode's group */
  198. return ext4_inode_to_goal_block(inode);
  199. }
  200. /*
  201. * Allocation for a meta data block
  202. */
  203. static ext4_fsblk_t
  204. ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
  205. struct ext4_ext_path *path,
  206. struct ext4_extent *ex, int *err, unsigned int flags)
  207. {
  208. ext4_fsblk_t goal, newblock;
  209. goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
  210. newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
  211. NULL, err);
  212. return newblock;
  213. }
  214. static inline int ext4_ext_space_block(struct inode *inode, int check)
  215. {
  216. int size;
  217. size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  218. / sizeof(struct ext4_extent);
  219. #ifdef AGGRESSIVE_TEST
  220. if (!check && size > 6)
  221. size = 6;
  222. #endif
  223. return size;
  224. }
  225. static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
  226. {
  227. int size;
  228. size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  229. / sizeof(struct ext4_extent_idx);
  230. #ifdef AGGRESSIVE_TEST
  231. if (!check && size > 5)
  232. size = 5;
  233. #endif
  234. return size;
  235. }
  236. static inline int ext4_ext_space_root(struct inode *inode, int check)
  237. {
  238. int size;
  239. size = sizeof(EXT4_I(inode)->i_data);
  240. size -= sizeof(struct ext4_extent_header);
  241. size /= sizeof(struct ext4_extent);
  242. #ifdef AGGRESSIVE_TEST
  243. if (!check && size > 3)
  244. size = 3;
  245. #endif
  246. return size;
  247. }
  248. static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
  249. {
  250. int size;
  251. size = sizeof(EXT4_I(inode)->i_data);
  252. size -= sizeof(struct ext4_extent_header);
  253. size /= sizeof(struct ext4_extent_idx);
  254. #ifdef AGGRESSIVE_TEST
  255. if (!check && size > 4)
  256. size = 4;
  257. #endif
  258. return size;
  259. }
  260. static inline int
  261. ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
  262. struct ext4_ext_path **ppath, ext4_lblk_t lblk,
  263. int nofail)
  264. {
  265. struct ext4_ext_path *path = *ppath;
  266. int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
  267. return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
  268. EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
  269. EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
  270. (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
  271. }
  272. /*
  273. * Calculate the number of metadata blocks needed
  274. * to allocate @blocks
  275. * Worse case is one block per extent
  276. */
  277. int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
  278. {
  279. struct ext4_inode_info *ei = EXT4_I(inode);
  280. int idxs;
  281. idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  282. / sizeof(struct ext4_extent_idx));
  283. /*
  284. * If the new delayed allocation block is contiguous with the
  285. * previous da block, it can share index blocks with the
  286. * previous block, so we only need to allocate a new index
  287. * block every idxs leaf blocks. At ldxs**2 blocks, we need
  288. * an additional index block, and at ldxs**3 blocks, yet
  289. * another index blocks.
  290. */
  291. if (ei->i_da_metadata_calc_len &&
  292. ei->i_da_metadata_calc_last_lblock+1 == lblock) {
  293. int num = 0;
  294. if ((ei->i_da_metadata_calc_len % idxs) == 0)
  295. num++;
  296. if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
  297. num++;
  298. if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
  299. num++;
  300. ei->i_da_metadata_calc_len = 0;
  301. } else
  302. ei->i_da_metadata_calc_len++;
  303. ei->i_da_metadata_calc_last_lblock++;
  304. return num;
  305. }
  306. /*
  307. * In the worst case we need a new set of index blocks at
  308. * every level of the inode's extent tree.
  309. */
  310. ei->i_da_metadata_calc_len = 1;
  311. ei->i_da_metadata_calc_last_lblock = lblock;
  312. return ext_depth(inode) + 1;
  313. }
  314. static int
  315. ext4_ext_max_entries(struct inode *inode, int depth)
  316. {
  317. int max;
  318. if (depth == ext_depth(inode)) {
  319. if (depth == 0)
  320. max = ext4_ext_space_root(inode, 1);
  321. else
  322. max = ext4_ext_space_root_idx(inode, 1);
  323. } else {
  324. if (depth == 0)
  325. max = ext4_ext_space_block(inode, 1);
  326. else
  327. max = ext4_ext_space_block_idx(inode, 1);
  328. }
  329. return max;
  330. }
  331. static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
  332. {
  333. ext4_fsblk_t block = ext4_ext_pblock(ext);
  334. int len = ext4_ext_get_actual_len(ext);
  335. ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
  336. /*
  337. * We allow neither:
  338. * - zero length
  339. * - overflow/wrap-around
  340. */
  341. if (lblock + len <= lblock)
  342. return 0;
  343. return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
  344. }
  345. static int ext4_valid_extent_idx(struct inode *inode,
  346. struct ext4_extent_idx *ext_idx)
  347. {
  348. ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
  349. return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
  350. }
  351. static int ext4_valid_extent_entries(struct inode *inode,
  352. struct ext4_extent_header *eh,
  353. int depth)
  354. {
  355. unsigned short entries;
  356. if (eh->eh_entries == 0)
  357. return 1;
  358. entries = le16_to_cpu(eh->eh_entries);
  359. if (depth == 0) {
  360. /* leaf entries */
  361. struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
  362. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  363. ext4_fsblk_t pblock = 0;
  364. ext4_lblk_t lblock = 0;
  365. ext4_lblk_t prev = 0;
  366. int len = 0;
  367. while (entries) {
  368. if (!ext4_valid_extent(inode, ext))
  369. return 0;
  370. /* Check for overlapping extents */
  371. lblock = le32_to_cpu(ext->ee_block);
  372. len = ext4_ext_get_actual_len(ext);
  373. if ((lblock <= prev) && prev) {
  374. pblock = ext4_ext_pblock(ext);
  375. es->s_last_error_block = cpu_to_le64(pblock);
  376. return 0;
  377. }
  378. ext++;
  379. entries--;
  380. prev = lblock + len - 1;
  381. }
  382. } else {
  383. struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
  384. while (entries) {
  385. if (!ext4_valid_extent_idx(inode, ext_idx))
  386. return 0;
  387. ext_idx++;
  388. entries--;
  389. }
  390. }
  391. return 1;
  392. }
  393. static int __ext4_ext_check(const char *function, unsigned int line,
  394. struct inode *inode, struct ext4_extent_header *eh,
  395. int depth, ext4_fsblk_t pblk)
  396. {
  397. const char *error_msg;
  398. int max = 0, err = -EFSCORRUPTED;
  399. if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
  400. error_msg = "invalid magic";
  401. goto corrupted;
  402. }
  403. if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
  404. error_msg = "unexpected eh_depth";
  405. goto corrupted;
  406. }
  407. if (unlikely(eh->eh_max == 0)) {
  408. error_msg = "invalid eh_max";
  409. goto corrupted;
  410. }
  411. max = ext4_ext_max_entries(inode, depth);
  412. if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
  413. error_msg = "too large eh_max";
  414. goto corrupted;
  415. }
  416. if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
  417. error_msg = "invalid eh_entries";
  418. goto corrupted;
  419. }
  420. if (!ext4_valid_extent_entries(inode, eh, depth)) {
  421. error_msg = "invalid extent entries";
  422. goto corrupted;
  423. }
  424. if (unlikely(depth > 32)) {
  425. error_msg = "too large eh_depth";
  426. goto corrupted;
  427. }
  428. /* Verify checksum on non-root extent tree nodes */
  429. if (ext_depth(inode) != depth &&
  430. !ext4_extent_block_csum_verify(inode, eh)) {
  431. error_msg = "extent tree corrupted";
  432. err = -EFSBADCRC;
  433. goto corrupted;
  434. }
  435. return 0;
  436. corrupted:
  437. ext4_error_inode(inode, function, line, 0,
  438. "pblk %llu bad header/extent: %s - magic %x, "
  439. "entries %u, max %u(%u), depth %u(%u)",
  440. (unsigned long long) pblk, error_msg,
  441. le16_to_cpu(eh->eh_magic),
  442. le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
  443. max, le16_to_cpu(eh->eh_depth), depth);
  444. return err;
  445. }
  446. #define ext4_ext_check(inode, eh, depth, pblk) \
  447. __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
  448. int ext4_ext_check_inode(struct inode *inode)
  449. {
  450. return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
  451. }
  452. static struct buffer_head *
  453. __read_extent_tree_block(const char *function, unsigned int line,
  454. struct inode *inode, ext4_fsblk_t pblk, int depth,
  455. int flags)
  456. {
  457. struct buffer_head *bh;
  458. int err;
  459. bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
  460. if (unlikely(!bh))
  461. return ERR_PTR(-ENOMEM);
  462. if (!bh_uptodate_or_lock(bh)) {
  463. trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
  464. err = bh_submit_read(bh);
  465. if (err < 0)
  466. goto errout;
  467. }
  468. if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
  469. return bh;
  470. err = __ext4_ext_check(function, line, inode,
  471. ext_block_hdr(bh), depth, pblk);
  472. if (err)
  473. goto errout;
  474. set_buffer_verified(bh);
  475. /*
  476. * If this is a leaf block, cache all of its entries
  477. */
  478. if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
  479. struct ext4_extent_header *eh = ext_block_hdr(bh);
  480. struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
  481. ext4_lblk_t prev = 0;
  482. int i;
  483. for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
  484. unsigned int status = EXTENT_STATUS_WRITTEN;
  485. ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
  486. int len = ext4_ext_get_actual_len(ex);
  487. if (prev && (prev != lblk))
  488. ext4_es_cache_extent(inode, prev,
  489. lblk - prev, ~0,
  490. EXTENT_STATUS_HOLE);
  491. if (ext4_ext_is_unwritten(ex))
  492. status = EXTENT_STATUS_UNWRITTEN;
  493. ext4_es_cache_extent(inode, lblk, len,
  494. ext4_ext_pblock(ex), status);
  495. prev = lblk + len;
  496. }
  497. }
  498. return bh;
  499. errout:
  500. put_bh(bh);
  501. return ERR_PTR(err);
  502. }
  503. #define read_extent_tree_block(inode, pblk, depth, flags) \
  504. __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
  505. (depth), (flags))
  506. /*
  507. * This function is called to cache a file's extent information in the
  508. * extent status tree
  509. */
  510. int ext4_ext_precache(struct inode *inode)
  511. {
  512. struct ext4_inode_info *ei = EXT4_I(inode);
  513. struct ext4_ext_path *path = NULL;
  514. struct buffer_head *bh;
  515. int i = 0, depth, ret = 0;
  516. if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  517. return 0; /* not an extent-mapped inode */
  518. down_read(&ei->i_data_sem);
  519. depth = ext_depth(inode);
  520. path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
  521. GFP_NOFS);
  522. if (path == NULL) {
  523. up_read(&ei->i_data_sem);
  524. return -ENOMEM;
  525. }
  526. /* Don't cache anything if there are no external extent blocks */
  527. if (depth == 0)
  528. goto out;
  529. path[0].p_hdr = ext_inode_hdr(inode);
  530. ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
  531. if (ret)
  532. goto out;
  533. path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
  534. while (i >= 0) {
  535. /*
  536. * If this is a leaf block or we've reached the end of
  537. * the index block, go up
  538. */
  539. if ((i == depth) ||
  540. path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
  541. brelse(path[i].p_bh);
  542. path[i].p_bh = NULL;
  543. i--;
  544. continue;
  545. }
  546. bh = read_extent_tree_block(inode,
  547. ext4_idx_pblock(path[i].p_idx++),
  548. depth - i - 1,
  549. EXT4_EX_FORCE_CACHE);
  550. if (IS_ERR(bh)) {
  551. ret = PTR_ERR(bh);
  552. break;
  553. }
  554. i++;
  555. path[i].p_bh = bh;
  556. path[i].p_hdr = ext_block_hdr(bh);
  557. path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
  558. }
  559. ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
  560. out:
  561. up_read(&ei->i_data_sem);
  562. ext4_ext_drop_refs(path);
  563. kfree(path);
  564. return ret;
  565. }
  566. #ifdef EXT_DEBUG
  567. static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
  568. {
  569. int k, l = path->p_depth;
  570. ext_debug("path:");
  571. for (k = 0; k <= l; k++, path++) {
  572. if (path->p_idx) {
  573. ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
  574. ext4_idx_pblock(path->p_idx));
  575. } else if (path->p_ext) {
  576. ext_debug(" %d:[%d]%d:%llu ",
  577. le32_to_cpu(path->p_ext->ee_block),
  578. ext4_ext_is_unwritten(path->p_ext),
  579. ext4_ext_get_actual_len(path->p_ext),
  580. ext4_ext_pblock(path->p_ext));
  581. } else
  582. ext_debug(" []");
  583. }
  584. ext_debug("\n");
  585. }
  586. static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
  587. {
  588. int depth = ext_depth(inode);
  589. struct ext4_extent_header *eh;
  590. struct ext4_extent *ex;
  591. int i;
  592. if (!path)
  593. return;
  594. eh = path[depth].p_hdr;
  595. ex = EXT_FIRST_EXTENT(eh);
  596. ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
  597. for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
  598. ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
  599. ext4_ext_is_unwritten(ex),
  600. ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
  601. }
  602. ext_debug("\n");
  603. }
  604. static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
  605. ext4_fsblk_t newblock, int level)
  606. {
  607. int depth = ext_depth(inode);
  608. struct ext4_extent *ex;
  609. if (depth != level) {
  610. struct ext4_extent_idx *idx;
  611. idx = path[level].p_idx;
  612. while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
  613. ext_debug("%d: move %d:%llu in new index %llu\n", level,
  614. le32_to_cpu(idx->ei_block),
  615. ext4_idx_pblock(idx),
  616. newblock);
  617. idx++;
  618. }
  619. return;
  620. }
  621. ex = path[depth].p_ext;
  622. while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
  623. ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
  624. le32_to_cpu(ex->ee_block),
  625. ext4_ext_pblock(ex),
  626. ext4_ext_is_unwritten(ex),
  627. ext4_ext_get_actual_len(ex),
  628. newblock);
  629. ex++;
  630. }
  631. }
  632. #else
  633. #define ext4_ext_show_path(inode, path)
  634. #define ext4_ext_show_leaf(inode, path)
  635. #define ext4_ext_show_move(inode, path, newblock, level)
  636. #endif
  637. void ext4_ext_drop_refs(struct ext4_ext_path *path)
  638. {
  639. int depth, i;
  640. if (!path)
  641. return;
  642. depth = path->p_depth;
  643. for (i = 0; i <= depth; i++, path++)
  644. if (path->p_bh) {
  645. brelse(path->p_bh);
  646. path->p_bh = NULL;
  647. }
  648. }
  649. /*
  650. * ext4_ext_binsearch_idx:
  651. * binary search for the closest index of the given block
  652. * the header must be checked before calling this
  653. */
  654. static void
  655. ext4_ext_binsearch_idx(struct inode *inode,
  656. struct ext4_ext_path *path, ext4_lblk_t block)
  657. {
  658. struct ext4_extent_header *eh = path->p_hdr;
  659. struct ext4_extent_idx *r, *l, *m;
  660. ext_debug("binsearch for %u(idx): ", block);
  661. l = EXT_FIRST_INDEX(eh) + 1;
  662. r = EXT_LAST_INDEX(eh);
  663. while (l <= r) {
  664. m = l + (r - l) / 2;
  665. if (block < le32_to_cpu(m->ei_block))
  666. r = m - 1;
  667. else
  668. l = m + 1;
  669. ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
  670. m, le32_to_cpu(m->ei_block),
  671. r, le32_to_cpu(r->ei_block));
  672. }
  673. path->p_idx = l - 1;
  674. ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
  675. ext4_idx_pblock(path->p_idx));
  676. #ifdef CHECK_BINSEARCH
  677. {
  678. struct ext4_extent_idx *chix, *ix;
  679. int k;
  680. chix = ix = EXT_FIRST_INDEX(eh);
  681. for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
  682. if (k != 0 &&
  683. le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
  684. printk(KERN_DEBUG "k=%d, ix=0x%p, "
  685. "first=0x%p\n", k,
  686. ix, EXT_FIRST_INDEX(eh));
  687. printk(KERN_DEBUG "%u <= %u\n",
  688. le32_to_cpu(ix->ei_block),
  689. le32_to_cpu(ix[-1].ei_block));
  690. }
  691. BUG_ON(k && le32_to_cpu(ix->ei_block)
  692. <= le32_to_cpu(ix[-1].ei_block));
  693. if (block < le32_to_cpu(ix->ei_block))
  694. break;
  695. chix = ix;
  696. }
  697. BUG_ON(chix != path->p_idx);
  698. }
  699. #endif
  700. }
  701. /*
  702. * ext4_ext_binsearch:
  703. * binary search for closest extent of the given block
  704. * the header must be checked before calling this
  705. */
  706. static void
  707. ext4_ext_binsearch(struct inode *inode,
  708. struct ext4_ext_path *path, ext4_lblk_t block)
  709. {
  710. struct ext4_extent_header *eh = path->p_hdr;
  711. struct ext4_extent *r, *l, *m;
  712. if (eh->eh_entries == 0) {
  713. /*
  714. * this leaf is empty:
  715. * we get such a leaf in split/add case
  716. */
  717. return;
  718. }
  719. ext_debug("binsearch for %u: ", block);
  720. l = EXT_FIRST_EXTENT(eh) + 1;
  721. r = EXT_LAST_EXTENT(eh);
  722. while (l <= r) {
  723. m = l + (r - l) / 2;
  724. if (block < le32_to_cpu(m->ee_block))
  725. r = m - 1;
  726. else
  727. l = m + 1;
  728. ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
  729. m, le32_to_cpu(m->ee_block),
  730. r, le32_to_cpu(r->ee_block));
  731. }
  732. path->p_ext = l - 1;
  733. ext_debug(" -> %d:%llu:[%d]%d ",
  734. le32_to_cpu(path->p_ext->ee_block),
  735. ext4_ext_pblock(path->p_ext),
  736. ext4_ext_is_unwritten(path->p_ext),
  737. ext4_ext_get_actual_len(path->p_ext));
  738. #ifdef CHECK_BINSEARCH
  739. {
  740. struct ext4_extent *chex, *ex;
  741. int k;
  742. chex = ex = EXT_FIRST_EXTENT(eh);
  743. for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
  744. BUG_ON(k && le32_to_cpu(ex->ee_block)
  745. <= le32_to_cpu(ex[-1].ee_block));
  746. if (block < le32_to_cpu(ex->ee_block))
  747. break;
  748. chex = ex;
  749. }
  750. BUG_ON(chex != path->p_ext);
  751. }
  752. #endif
  753. }
  754. int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
  755. {
  756. struct ext4_extent_header *eh;
  757. eh = ext_inode_hdr(inode);
  758. eh->eh_depth = 0;
  759. eh->eh_entries = 0;
  760. eh->eh_magic = EXT4_EXT_MAGIC;
  761. eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
  762. ext4_mark_inode_dirty(handle, inode);
  763. return 0;
  764. }
  765. struct ext4_ext_path *
  766. ext4_find_extent(struct inode *inode, ext4_lblk_t block,
  767. struct ext4_ext_path **orig_path, int flags)
  768. {
  769. struct ext4_extent_header *eh;
  770. struct buffer_head *bh;
  771. struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
  772. short int depth, i, ppos = 0;
  773. int ret;
  774. eh = ext_inode_hdr(inode);
  775. depth = ext_depth(inode);
  776. if (path) {
  777. ext4_ext_drop_refs(path);
  778. if (depth > path[0].p_maxdepth) {
  779. kfree(path);
  780. *orig_path = path = NULL;
  781. }
  782. }
  783. if (!path) {
  784. /* account possible depth increase */
  785. path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
  786. GFP_NOFS);
  787. if (unlikely(!path))
  788. return ERR_PTR(-ENOMEM);
  789. path[0].p_maxdepth = depth + 1;
  790. }
  791. path[0].p_hdr = eh;
  792. path[0].p_bh = NULL;
  793. i = depth;
  794. /* walk through the tree */
  795. while (i) {
  796. ext_debug("depth %d: num %d, max %d\n",
  797. ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
  798. ext4_ext_binsearch_idx(inode, path + ppos, block);
  799. path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
  800. path[ppos].p_depth = i;
  801. path[ppos].p_ext = NULL;
  802. bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
  803. flags);
  804. if (IS_ERR(bh)) {
  805. ret = PTR_ERR(bh);
  806. goto err;
  807. }
  808. eh = ext_block_hdr(bh);
  809. ppos++;
  810. path[ppos].p_bh = bh;
  811. path[ppos].p_hdr = eh;
  812. }
  813. path[ppos].p_depth = i;
  814. path[ppos].p_ext = NULL;
  815. path[ppos].p_idx = NULL;
  816. /* find extent */
  817. ext4_ext_binsearch(inode, path + ppos, block);
  818. /* if not an empty leaf */
  819. if (path[ppos].p_ext)
  820. path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
  821. ext4_ext_show_path(inode, path);
  822. return path;
  823. err:
  824. ext4_ext_drop_refs(path);
  825. kfree(path);
  826. if (orig_path)
  827. *orig_path = NULL;
  828. return ERR_PTR(ret);
  829. }
  830. /*
  831. * ext4_ext_insert_index:
  832. * insert new index [@logical;@ptr] into the block at @curp;
  833. * check where to insert: before @curp or after @curp
  834. */
  835. static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
  836. struct ext4_ext_path *curp,
  837. int logical, ext4_fsblk_t ptr)
  838. {
  839. struct ext4_extent_idx *ix;
  840. int len, err;
  841. err = ext4_ext_get_access(handle, inode, curp);
  842. if (err)
  843. return err;
  844. if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
  845. EXT4_ERROR_INODE(inode,
  846. "logical %d == ei_block %d!",
  847. logical, le32_to_cpu(curp->p_idx->ei_block));
  848. return -EFSCORRUPTED;
  849. }
  850. if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
  851. >= le16_to_cpu(curp->p_hdr->eh_max))) {
  852. EXT4_ERROR_INODE(inode,
  853. "eh_entries %d >= eh_max %d!",
  854. le16_to_cpu(curp->p_hdr->eh_entries),
  855. le16_to_cpu(curp->p_hdr->eh_max));
  856. return -EFSCORRUPTED;
  857. }
  858. if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
  859. /* insert after */
  860. ext_debug("insert new index %d after: %llu\n", logical, ptr);
  861. ix = curp->p_idx + 1;
  862. } else {
  863. /* insert before */
  864. ext_debug("insert new index %d before: %llu\n", logical, ptr);
  865. ix = curp->p_idx;
  866. }
  867. len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
  868. BUG_ON(len < 0);
  869. if (len > 0) {
  870. ext_debug("insert new index %d: "
  871. "move %d indices from 0x%p to 0x%p\n",
  872. logical, len, ix, ix + 1);
  873. memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
  874. }
  875. if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
  876. EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
  877. return -EFSCORRUPTED;
  878. }
  879. ix->ei_block = cpu_to_le32(logical);
  880. ext4_idx_store_pblock(ix, ptr);
  881. le16_add_cpu(&curp->p_hdr->eh_entries, 1);
  882. if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
  883. EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
  884. return -EFSCORRUPTED;
  885. }
  886. err = ext4_ext_dirty(handle, inode, curp);
  887. ext4_std_error(inode->i_sb, err);
  888. return err;
  889. }
  890. /*
  891. * ext4_ext_split:
  892. * inserts new subtree into the path, using free index entry
  893. * at depth @at:
  894. * - allocates all needed blocks (new leaf and all intermediate index blocks)
  895. * - makes decision where to split
  896. * - moves remaining extents and index entries (right to the split point)
  897. * into the newly allocated blocks
  898. * - initializes subtree
  899. */
  900. static int ext4_ext_split(handle_t *handle, struct inode *inode,
  901. unsigned int flags,
  902. struct ext4_ext_path *path,
  903. struct ext4_extent *newext, int at)
  904. {
  905. struct buffer_head *bh = NULL;
  906. int depth = ext_depth(inode);
  907. struct ext4_extent_header *neh;
  908. struct ext4_extent_idx *fidx;
  909. int i = at, k, m, a;
  910. ext4_fsblk_t newblock, oldblock;
  911. __le32 border;
  912. ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
  913. int err = 0;
  914. /* make decision: where to split? */
  915. /* FIXME: now decision is simplest: at current extent */
  916. /* if current leaf will be split, then we should use
  917. * border from split point */
  918. if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
  919. EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
  920. return -EFSCORRUPTED;
  921. }
  922. if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
  923. border = path[depth].p_ext[1].ee_block;
  924. ext_debug("leaf will be split."
  925. " next leaf starts at %d\n",
  926. le32_to_cpu(border));
  927. } else {
  928. border = newext->ee_block;
  929. ext_debug("leaf will be added."
  930. " next leaf starts at %d\n",
  931. le32_to_cpu(border));
  932. }
  933. /*
  934. * If error occurs, then we break processing
  935. * and mark filesystem read-only. index won't
  936. * be inserted and tree will be in consistent
  937. * state. Next mount will repair buffers too.
  938. */
  939. /*
  940. * Get array to track all allocated blocks.
  941. * We need this to handle errors and free blocks
  942. * upon them.
  943. */
  944. ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
  945. if (!ablocks)
  946. return -ENOMEM;
  947. /* allocate all needed blocks */
  948. ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
  949. for (a = 0; a < depth - at; a++) {
  950. newblock = ext4_ext_new_meta_block(handle, inode, path,
  951. newext, &err, flags);
  952. if (newblock == 0)
  953. goto cleanup;
  954. ablocks[a] = newblock;
  955. }
  956. /* initialize new leaf */
  957. newblock = ablocks[--a];
  958. if (unlikely(newblock == 0)) {
  959. EXT4_ERROR_INODE(inode, "newblock == 0!");
  960. err = -EFSCORRUPTED;
  961. goto cleanup;
  962. }
  963. bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
  964. if (unlikely(!bh)) {
  965. err = -ENOMEM;
  966. goto cleanup;
  967. }
  968. lock_buffer(bh);
  969. err = ext4_journal_get_create_access(handle, bh);
  970. if (err)
  971. goto cleanup;
  972. neh = ext_block_hdr(bh);
  973. neh->eh_entries = 0;
  974. neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
  975. neh->eh_magic = EXT4_EXT_MAGIC;
  976. neh->eh_depth = 0;
  977. /* move remainder of path[depth] to the new leaf */
  978. if (unlikely(path[depth].p_hdr->eh_entries !=
  979. path[depth].p_hdr->eh_max)) {
  980. EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
  981. path[depth].p_hdr->eh_entries,
  982. path[depth].p_hdr->eh_max);
  983. err = -EFSCORRUPTED;
  984. goto cleanup;
  985. }
  986. /* start copy from next extent */
  987. m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
  988. ext4_ext_show_move(inode, path, newblock, depth);
  989. if (m) {
  990. struct ext4_extent *ex;
  991. ex = EXT_FIRST_EXTENT(neh);
  992. memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
  993. le16_add_cpu(&neh->eh_entries, m);
  994. }
  995. ext4_extent_block_csum_set(inode, neh);
  996. set_buffer_uptodate(bh);
  997. unlock_buffer(bh);
  998. err = ext4_handle_dirty_metadata(handle, inode, bh);
  999. if (err)
  1000. goto cleanup;
  1001. brelse(bh);
  1002. bh = NULL;
  1003. /* correct old leaf */
  1004. if (m) {
  1005. err = ext4_ext_get_access(handle, inode, path + depth);
  1006. if (err)
  1007. goto cleanup;
  1008. le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
  1009. err = ext4_ext_dirty(handle, inode, path + depth);
  1010. if (err)
  1011. goto cleanup;
  1012. }
  1013. /* create intermediate indexes */
  1014. k = depth - at - 1;
  1015. if (unlikely(k < 0)) {
  1016. EXT4_ERROR_INODE(inode, "k %d < 0!", k);
  1017. err = -EFSCORRUPTED;
  1018. goto cleanup;
  1019. }
  1020. if (k)
  1021. ext_debug("create %d intermediate indices\n", k);
  1022. /* insert new index into current index block */
  1023. /* current depth stored in i var */
  1024. i = depth - 1;
  1025. while (k--) {
  1026. oldblock = newblock;
  1027. newblock = ablocks[--a];
  1028. bh = sb_getblk(inode->i_sb, newblock);
  1029. if (unlikely(!bh)) {
  1030. err = -ENOMEM;
  1031. goto cleanup;
  1032. }
  1033. lock_buffer(bh);
  1034. err = ext4_journal_get_create_access(handle, bh);
  1035. if (err)
  1036. goto cleanup;
  1037. neh = ext_block_hdr(bh);
  1038. neh->eh_entries = cpu_to_le16(1);
  1039. neh->eh_magic = EXT4_EXT_MAGIC;
  1040. neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
  1041. neh->eh_depth = cpu_to_le16(depth - i);
  1042. fidx = EXT_FIRST_INDEX(neh);
  1043. fidx->ei_block = border;
  1044. ext4_idx_store_pblock(fidx, oldblock);
  1045. ext_debug("int.index at %d (block %llu): %u -> %llu\n",
  1046. i, newblock, le32_to_cpu(border), oldblock);
  1047. /* move remainder of path[i] to the new index block */
  1048. if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
  1049. EXT_LAST_INDEX(path[i].p_hdr))) {
  1050. EXT4_ERROR_INODE(inode,
  1051. "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
  1052. le32_to_cpu(path[i].p_ext->ee_block));
  1053. err = -EFSCORRUPTED;
  1054. goto cleanup;
  1055. }
  1056. /* start copy indexes */
  1057. m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
  1058. ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
  1059. EXT_MAX_INDEX(path[i].p_hdr));
  1060. ext4_ext_show_move(inode, path, newblock, i);
  1061. if (m) {
  1062. memmove(++fidx, path[i].p_idx,
  1063. sizeof(struct ext4_extent_idx) * m);
  1064. le16_add_cpu(&neh->eh_entries, m);
  1065. }
  1066. ext4_extent_block_csum_set(inode, neh);
  1067. set_buffer_uptodate(bh);
  1068. unlock_buffer(bh);
  1069. err = ext4_handle_dirty_metadata(handle, inode, bh);
  1070. if (err)
  1071. goto cleanup;
  1072. brelse(bh);
  1073. bh = NULL;
  1074. /* correct old index */
  1075. if (m) {
  1076. err = ext4_ext_get_access(handle, inode, path + i);
  1077. if (err)
  1078. goto cleanup;
  1079. le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
  1080. err = ext4_ext_dirty(handle, inode, path + i);
  1081. if (err)
  1082. goto cleanup;
  1083. }
  1084. i--;
  1085. }
  1086. /* insert new index */
  1087. err = ext4_ext_insert_index(handle, inode, path + at,
  1088. le32_to_cpu(border), newblock);
  1089. cleanup:
  1090. if (bh) {
  1091. if (buffer_locked(bh))
  1092. unlock_buffer(bh);
  1093. brelse(bh);
  1094. }
  1095. if (err) {
  1096. /* free all allocated blocks in error case */
  1097. for (i = 0; i < depth; i++) {
  1098. if (!ablocks[i])
  1099. continue;
  1100. ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
  1101. EXT4_FREE_BLOCKS_METADATA);
  1102. }
  1103. }
  1104. kfree(ablocks);
  1105. return err;
  1106. }
  1107. /*
  1108. * ext4_ext_grow_indepth:
  1109. * implements tree growing procedure:
  1110. * - allocates new block
  1111. * - moves top-level data (index block or leaf) into the new block
  1112. * - initializes new top-level, creating index that points to the
  1113. * just created block
  1114. */
  1115. static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
  1116. unsigned int flags)
  1117. {
  1118. struct ext4_extent_header *neh;
  1119. struct buffer_head *bh;
  1120. ext4_fsblk_t newblock, goal = 0;
  1121. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  1122. int err = 0;
  1123. /* Try to prepend new index to old one */
  1124. if (ext_depth(inode))
  1125. goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
  1126. if (goal > le32_to_cpu(es->s_first_data_block)) {
  1127. flags |= EXT4_MB_HINT_TRY_GOAL;
  1128. goal--;
  1129. } else
  1130. goal = ext4_inode_to_goal_block(inode);
  1131. newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
  1132. NULL, &err);
  1133. if (newblock == 0)
  1134. return err;
  1135. bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
  1136. if (unlikely(!bh))
  1137. return -ENOMEM;
  1138. lock_buffer(bh);
  1139. err = ext4_journal_get_create_access(handle, bh);
  1140. if (err) {
  1141. unlock_buffer(bh);
  1142. goto out;
  1143. }
  1144. /* move top-level index/leaf into new block */
  1145. memmove(bh->b_data, EXT4_I(inode)->i_data,
  1146. sizeof(EXT4_I(inode)->i_data));
  1147. /* set size of new block */
  1148. neh = ext_block_hdr(bh);
  1149. /* old root could have indexes or leaves
  1150. * so calculate e_max right way */
  1151. if (ext_depth(inode))
  1152. neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
  1153. else
  1154. neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
  1155. neh->eh_magic = EXT4_EXT_MAGIC;
  1156. ext4_extent_block_csum_set(inode, neh);
  1157. set_buffer_uptodate(bh);
  1158. unlock_buffer(bh);
  1159. err = ext4_handle_dirty_metadata(handle, inode, bh);
  1160. if (err)
  1161. goto out;
  1162. /* Update top-level index: num,max,pointer */
  1163. neh = ext_inode_hdr(inode);
  1164. neh->eh_entries = cpu_to_le16(1);
  1165. ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
  1166. if (neh->eh_depth == 0) {
  1167. /* Root extent block becomes index block */
  1168. neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
  1169. EXT_FIRST_INDEX(neh)->ei_block =
  1170. EXT_FIRST_EXTENT(neh)->ee_block;
  1171. }
  1172. ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
  1173. le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
  1174. le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
  1175. ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
  1176. le16_add_cpu(&neh->eh_depth, 1);
  1177. ext4_mark_inode_dirty(handle, inode);
  1178. out:
  1179. brelse(bh);
  1180. return err;
  1181. }
  1182. /*
  1183. * ext4_ext_create_new_leaf:
  1184. * finds empty index and adds new leaf.
  1185. * if no free index is found, then it requests in-depth growing.
  1186. */
  1187. static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
  1188. unsigned int mb_flags,
  1189. unsigned int gb_flags,
  1190. struct ext4_ext_path **ppath,
  1191. struct ext4_extent *newext)
  1192. {
  1193. struct ext4_ext_path *path = *ppath;
  1194. struct ext4_ext_path *curp;
  1195. int depth, i, err = 0;
  1196. repeat:
  1197. i = depth = ext_depth(inode);
  1198. /* walk up to the tree and look for free index entry */
  1199. curp = path + depth;
  1200. while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
  1201. i--;
  1202. curp--;
  1203. }
  1204. /* we use already allocated block for index block,
  1205. * so subsequent data blocks should be contiguous */
  1206. if (EXT_HAS_FREE_INDEX(curp)) {
  1207. /* if we found index with free entry, then use that
  1208. * entry: create all needed subtree and add new leaf */
  1209. err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
  1210. if (err)
  1211. goto out;
  1212. /* refill path */
  1213. path = ext4_find_extent(inode,
  1214. (ext4_lblk_t)le32_to_cpu(newext->ee_block),
  1215. ppath, gb_flags);
  1216. if (IS_ERR(path))
  1217. err = PTR_ERR(path);
  1218. } else {
  1219. /* tree is full, time to grow in depth */
  1220. err = ext4_ext_grow_indepth(handle, inode, mb_flags);
  1221. if (err)
  1222. goto out;
  1223. /* refill path */
  1224. path = ext4_find_extent(inode,
  1225. (ext4_lblk_t)le32_to_cpu(newext->ee_block),
  1226. ppath, gb_flags);
  1227. if (IS_ERR(path)) {
  1228. err = PTR_ERR(path);
  1229. goto out;
  1230. }
  1231. /*
  1232. * only first (depth 0 -> 1) produces free space;
  1233. * in all other cases we have to split the grown tree
  1234. */
  1235. depth = ext_depth(inode);
  1236. if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
  1237. /* now we need to split */
  1238. goto repeat;
  1239. }
  1240. }
  1241. out:
  1242. return err;
  1243. }
  1244. /*
  1245. * search the closest allocated block to the left for *logical
  1246. * and returns it at @logical + it's physical address at @phys
  1247. * if *logical is the smallest allocated block, the function
  1248. * returns 0 at @phys
  1249. * return value contains 0 (success) or error code
  1250. */
  1251. static int ext4_ext_search_left(struct inode *inode,
  1252. struct ext4_ext_path *path,
  1253. ext4_lblk_t *logical, ext4_fsblk_t *phys)
  1254. {
  1255. struct ext4_extent_idx *ix;
  1256. struct ext4_extent *ex;
  1257. int depth, ee_len;
  1258. if (unlikely(path == NULL)) {
  1259. EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
  1260. return -EFSCORRUPTED;
  1261. }
  1262. depth = path->p_depth;
  1263. *phys = 0;
  1264. if (depth == 0 && path->p_ext == NULL)
  1265. return 0;
  1266. /* usually extent in the path covers blocks smaller
  1267. * then *logical, but it can be that extent is the
  1268. * first one in the file */
  1269. ex = path[depth].p_ext;
  1270. ee_len = ext4_ext_get_actual_len(ex);
  1271. if (*logical < le32_to_cpu(ex->ee_block)) {
  1272. if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
  1273. EXT4_ERROR_INODE(inode,
  1274. "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
  1275. *logical, le32_to_cpu(ex->ee_block));
  1276. return -EFSCORRUPTED;
  1277. }
  1278. while (--depth >= 0) {
  1279. ix = path[depth].p_idx;
  1280. if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
  1281. EXT4_ERROR_INODE(inode,
  1282. "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
  1283. ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
  1284. EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
  1285. le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
  1286. depth);
  1287. return -EFSCORRUPTED;
  1288. }
  1289. }
  1290. return 0;
  1291. }
  1292. if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
  1293. EXT4_ERROR_INODE(inode,
  1294. "logical %d < ee_block %d + ee_len %d!",
  1295. *logical, le32_to_cpu(ex->ee_block), ee_len);
  1296. return -EFSCORRUPTED;
  1297. }
  1298. *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
  1299. *phys = ext4_ext_pblock(ex) + ee_len - 1;
  1300. return 0;
  1301. }
  1302. /*
  1303. * search the closest allocated block to the right for *logical
  1304. * and returns it at @logical + it's physical address at @phys
  1305. * if *logical is the largest allocated block, the function
  1306. * returns 0 at @phys
  1307. * return value contains 0 (success) or error code
  1308. */
  1309. static int ext4_ext_search_right(struct inode *inode,
  1310. struct ext4_ext_path *path,
  1311. ext4_lblk_t *logical, ext4_fsblk_t *phys,
  1312. struct ext4_extent **ret_ex)
  1313. {
  1314. struct buffer_head *bh = NULL;
  1315. struct ext4_extent_header *eh;
  1316. struct ext4_extent_idx *ix;
  1317. struct ext4_extent *ex;
  1318. ext4_fsblk_t block;
  1319. int depth; /* Note, NOT eh_depth; depth from top of tree */
  1320. int ee_len;
  1321. if (unlikely(path == NULL)) {
  1322. EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
  1323. return -EFSCORRUPTED;
  1324. }
  1325. depth = path->p_depth;
  1326. *phys = 0;
  1327. if (depth == 0 && path->p_ext == NULL)
  1328. return 0;
  1329. /* usually extent in the path covers blocks smaller
  1330. * then *logical, but it can be that extent is the
  1331. * first one in the file */
  1332. ex = path[depth].p_ext;
  1333. ee_len = ext4_ext_get_actual_len(ex);
  1334. if (*logical < le32_to_cpu(ex->ee_block)) {
  1335. if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
  1336. EXT4_ERROR_INODE(inode,
  1337. "first_extent(path[%d].p_hdr) != ex",
  1338. depth);
  1339. return -EFSCORRUPTED;
  1340. }
  1341. while (--depth >= 0) {
  1342. ix = path[depth].p_idx;
  1343. if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
  1344. EXT4_ERROR_INODE(inode,
  1345. "ix != EXT_FIRST_INDEX *logical %d!",
  1346. *logical);
  1347. return -EFSCORRUPTED;
  1348. }
  1349. }
  1350. goto found_extent;
  1351. }
  1352. if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
  1353. EXT4_ERROR_INODE(inode,
  1354. "logical %d < ee_block %d + ee_len %d!",
  1355. *logical, le32_to_cpu(ex->ee_block), ee_len);
  1356. return -EFSCORRUPTED;
  1357. }
  1358. if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
  1359. /* next allocated block in this leaf */
  1360. ex++;
  1361. goto found_extent;
  1362. }
  1363. /* go up and search for index to the right */
  1364. while (--depth >= 0) {
  1365. ix = path[depth].p_idx;
  1366. if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
  1367. goto got_index;
  1368. }
  1369. /* we've gone up to the root and found no index to the right */
  1370. return 0;
  1371. got_index:
  1372. /* we've found index to the right, let's
  1373. * follow it and find the closest allocated
  1374. * block to the right */
  1375. ix++;
  1376. block = ext4_idx_pblock(ix);
  1377. while (++depth < path->p_depth) {
  1378. /* subtract from p_depth to get proper eh_depth */
  1379. bh = read_extent_tree_block(inode, block,
  1380. path->p_depth - depth, 0);
  1381. if (IS_ERR(bh))
  1382. return PTR_ERR(bh);
  1383. eh = ext_block_hdr(bh);
  1384. ix = EXT_FIRST_INDEX(eh);
  1385. block = ext4_idx_pblock(ix);
  1386. put_bh(bh);
  1387. }
  1388. bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
  1389. if (IS_ERR(bh))
  1390. return PTR_ERR(bh);
  1391. eh = ext_block_hdr(bh);
  1392. ex = EXT_FIRST_EXTENT(eh);
  1393. found_extent:
  1394. *logical = le32_to_cpu(ex->ee_block);
  1395. *phys = ext4_ext_pblock(ex);
  1396. *ret_ex = ex;
  1397. if (bh)
  1398. put_bh(bh);
  1399. return 0;
  1400. }
  1401. /*
  1402. * ext4_ext_next_allocated_block:
  1403. * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
  1404. * NOTE: it considers block number from index entry as
  1405. * allocated block. Thus, index entries have to be consistent
  1406. * with leaves.
  1407. */
  1408. ext4_lblk_t
  1409. ext4_ext_next_allocated_block(struct ext4_ext_path *path)
  1410. {
  1411. int depth;
  1412. BUG_ON(path == NULL);
  1413. depth = path->p_depth;
  1414. if (depth == 0 && path->p_ext == NULL)
  1415. return EXT_MAX_BLOCKS;
  1416. while (depth >= 0) {
  1417. if (depth == path->p_depth) {
  1418. /* leaf */
  1419. if (path[depth].p_ext &&
  1420. path[depth].p_ext !=
  1421. EXT_LAST_EXTENT(path[depth].p_hdr))
  1422. return le32_to_cpu(path[depth].p_ext[1].ee_block);
  1423. } else {
  1424. /* index */
  1425. if (path[depth].p_idx !=
  1426. EXT_LAST_INDEX(path[depth].p_hdr))
  1427. return le32_to_cpu(path[depth].p_idx[1].ei_block);
  1428. }
  1429. depth--;
  1430. }
  1431. return EXT_MAX_BLOCKS;
  1432. }
  1433. /*
  1434. * ext4_ext_next_leaf_block:
  1435. * returns first allocated block from next leaf or EXT_MAX_BLOCKS
  1436. */
  1437. static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
  1438. {
  1439. int depth;
  1440. BUG_ON(path == NULL);
  1441. depth = path->p_depth;
  1442. /* zero-tree has no leaf blocks at all */
  1443. if (depth == 0)
  1444. return EXT_MAX_BLOCKS;
  1445. /* go to index block */
  1446. depth--;
  1447. while (depth >= 0) {
  1448. if (path[depth].p_idx !=
  1449. EXT_LAST_INDEX(path[depth].p_hdr))
  1450. return (ext4_lblk_t)
  1451. le32_to_cpu(path[depth].p_idx[1].ei_block);
  1452. depth--;
  1453. }
  1454. return EXT_MAX_BLOCKS;
  1455. }
  1456. /*
  1457. * ext4_ext_correct_indexes:
  1458. * if leaf gets modified and modified extent is first in the leaf,
  1459. * then we have to correct all indexes above.
  1460. * TODO: do we need to correct tree in all cases?
  1461. */
  1462. static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
  1463. struct ext4_ext_path *path)
  1464. {
  1465. struct ext4_extent_header *eh;
  1466. int depth = ext_depth(inode);
  1467. struct ext4_extent *ex;
  1468. __le32 border;
  1469. int k, err = 0;
  1470. eh = path[depth].p_hdr;
  1471. ex = path[depth].p_ext;
  1472. if (unlikely(ex == NULL || eh == NULL)) {
  1473. EXT4_ERROR_INODE(inode,
  1474. "ex %p == NULL or eh %p == NULL", ex, eh);
  1475. return -EFSCORRUPTED;
  1476. }
  1477. if (depth == 0) {
  1478. /* there is no tree at all */
  1479. return 0;
  1480. }
  1481. if (ex != EXT_FIRST_EXTENT(eh)) {
  1482. /* we correct tree if first leaf got modified only */
  1483. return 0;
  1484. }
  1485. /*
  1486. * TODO: we need correction if border is smaller than current one
  1487. */
  1488. k = depth - 1;
  1489. border = path[depth].p_ext->ee_block;
  1490. err = ext4_ext_get_access(handle, inode, path + k);
  1491. if (err)
  1492. return err;
  1493. path[k].p_idx->ei_block = border;
  1494. err = ext4_ext_dirty(handle, inode, path + k);
  1495. if (err)
  1496. return err;
  1497. while (k--) {
  1498. /* change all left-side indexes */
  1499. if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
  1500. break;
  1501. err = ext4_ext_get_access(handle, inode, path + k);
  1502. if (err)
  1503. break;
  1504. path[k].p_idx->ei_block = border;
  1505. err = ext4_ext_dirty(handle, inode, path + k);
  1506. if (err)
  1507. break;
  1508. }
  1509. return err;
  1510. }
  1511. int
  1512. ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
  1513. struct ext4_extent *ex2)
  1514. {
  1515. unsigned short ext1_ee_len, ext2_ee_len;
  1516. if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
  1517. return 0;
  1518. ext1_ee_len = ext4_ext_get_actual_len(ex1);
  1519. ext2_ee_len = ext4_ext_get_actual_len(ex2);
  1520. if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
  1521. le32_to_cpu(ex2->ee_block))
  1522. return 0;
  1523. /*
  1524. * To allow future support for preallocated extents to be added
  1525. * as an RO_COMPAT feature, refuse to merge to extents if
  1526. * this can result in the top bit of ee_len being set.
  1527. */
  1528. if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
  1529. return 0;
  1530. /*
  1531. * The check for IO to unwritten extent is somewhat racy as we
  1532. * increment i_unwritten / set EXT4_STATE_DIO_UNWRITTEN only after
  1533. * dropping i_data_sem. But reserved blocks should save us in that
  1534. * case.
  1535. */
  1536. if (ext4_ext_is_unwritten(ex1) &&
  1537. (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
  1538. atomic_read(&EXT4_I(inode)->i_unwritten) ||
  1539. (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
  1540. return 0;
  1541. #ifdef AGGRESSIVE_TEST
  1542. if (ext1_ee_len >= 4)
  1543. return 0;
  1544. #endif
  1545. if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
  1546. return 1;
  1547. return 0;
  1548. }
  1549. /*
  1550. * This function tries to merge the "ex" extent to the next extent in the tree.
  1551. * It always tries to merge towards right. If you want to merge towards
  1552. * left, pass "ex - 1" as argument instead of "ex".
  1553. * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
  1554. * 1 if they got merged.
  1555. */
  1556. static int ext4_ext_try_to_merge_right(struct inode *inode,
  1557. struct ext4_ext_path *path,
  1558. struct ext4_extent *ex)
  1559. {
  1560. struct ext4_extent_header *eh;
  1561. unsigned int depth, len;
  1562. int merge_done = 0, unwritten;
  1563. depth = ext_depth(inode);
  1564. BUG_ON(path[depth].p_hdr == NULL);
  1565. eh = path[depth].p_hdr;
  1566. while (ex < EXT_LAST_EXTENT(eh)) {
  1567. if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
  1568. break;
  1569. /* merge with next extent! */
  1570. unwritten = ext4_ext_is_unwritten(ex);
  1571. ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
  1572. + ext4_ext_get_actual_len(ex + 1));
  1573. if (unwritten)
  1574. ext4_ext_mark_unwritten(ex);
  1575. if (ex + 1 < EXT_LAST_EXTENT(eh)) {
  1576. len = (EXT_LAST_EXTENT(eh) - ex - 1)
  1577. * sizeof(struct ext4_extent);
  1578. memmove(ex + 1, ex + 2, len);
  1579. }
  1580. le16_add_cpu(&eh->eh_entries, -1);
  1581. merge_done = 1;
  1582. WARN_ON(eh->eh_entries == 0);
  1583. if (!eh->eh_entries)
  1584. EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
  1585. }
  1586. return merge_done;
  1587. }
  1588. /*
  1589. * This function does a very simple check to see if we can collapse
  1590. * an extent tree with a single extent tree leaf block into the inode.
  1591. */
  1592. static void ext4_ext_try_to_merge_up(handle_t *handle,
  1593. struct inode *inode,
  1594. struct ext4_ext_path *path)
  1595. {
  1596. size_t s;
  1597. unsigned max_root = ext4_ext_space_root(inode, 0);
  1598. ext4_fsblk_t blk;
  1599. if ((path[0].p_depth != 1) ||
  1600. (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
  1601. (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
  1602. return;
  1603. /*
  1604. * We need to modify the block allocation bitmap and the block
  1605. * group descriptor to release the extent tree block. If we
  1606. * can't get the journal credits, give up.
  1607. */
  1608. if (ext4_journal_extend(handle, 2))
  1609. return;
  1610. /*
  1611. * Copy the extent data up to the inode
  1612. */
  1613. blk = ext4_idx_pblock(path[0].p_idx);
  1614. s = le16_to_cpu(path[1].p_hdr->eh_entries) *
  1615. sizeof(struct ext4_extent_idx);
  1616. s += sizeof(struct ext4_extent_header);
  1617. path[1].p_maxdepth = path[0].p_maxdepth;
  1618. memcpy(path[0].p_hdr, path[1].p_hdr, s);
  1619. path[0].p_depth = 0;
  1620. path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
  1621. (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
  1622. path[0].p_hdr->eh_max = cpu_to_le16(max_root);
  1623. brelse(path[1].p_bh);
  1624. ext4_free_blocks(handle, inode, NULL, blk, 1,
  1625. EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
  1626. }
  1627. /*
  1628. * This function tries to merge the @ex extent to neighbours in the tree.
  1629. * return 1 if merge left else 0.
  1630. */
  1631. static void ext4_ext_try_to_merge(handle_t *handle,
  1632. struct inode *inode,
  1633. struct ext4_ext_path *path,
  1634. struct ext4_extent *ex) {
  1635. struct ext4_extent_header *eh;
  1636. unsigned int depth;
  1637. int merge_done = 0;
  1638. depth = ext_depth(inode);
  1639. BUG_ON(path[depth].p_hdr == NULL);
  1640. eh = path[depth].p_hdr;
  1641. if (ex > EXT_FIRST_EXTENT(eh))
  1642. merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
  1643. if (!merge_done)
  1644. (void) ext4_ext_try_to_merge_right(inode, path, ex);
  1645. ext4_ext_try_to_merge_up(handle, inode, path);
  1646. }
  1647. /*
  1648. * check if a portion of the "newext" extent overlaps with an
  1649. * existing extent.
  1650. *
  1651. * If there is an overlap discovered, it updates the length of the newext
  1652. * such that there will be no overlap, and then returns 1.
  1653. * If there is no overlap found, it returns 0.
  1654. */
  1655. static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
  1656. struct inode *inode,
  1657. struct ext4_extent *newext,
  1658. struct ext4_ext_path *path)
  1659. {
  1660. ext4_lblk_t b1, b2;
  1661. unsigned int depth, len1;
  1662. unsigned int ret = 0;
  1663. b1 = le32_to_cpu(newext->ee_block);
  1664. len1 = ext4_ext_get_actual_len(newext);
  1665. depth = ext_depth(inode);
  1666. if (!path[depth].p_ext)
  1667. goto out;
  1668. b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
  1669. /*
  1670. * get the next allocated block if the extent in the path
  1671. * is before the requested block(s)
  1672. */
  1673. if (b2 < b1) {
  1674. b2 = ext4_ext_next_allocated_block(path);
  1675. if (b2 == EXT_MAX_BLOCKS)
  1676. goto out;
  1677. b2 = EXT4_LBLK_CMASK(sbi, b2);
  1678. }
  1679. /* check for wrap through zero on extent logical start block*/
  1680. if (b1 + len1 < b1) {
  1681. len1 = EXT_MAX_BLOCKS - b1;
  1682. newext->ee_len = cpu_to_le16(len1);
  1683. ret = 1;
  1684. }
  1685. /* check for overlap */
  1686. if (b1 + len1 > b2) {
  1687. newext->ee_len = cpu_to_le16(b2 - b1);
  1688. ret = 1;
  1689. }
  1690. out:
  1691. return ret;
  1692. }
  1693. /*
  1694. * ext4_ext_insert_extent:
  1695. * tries to merge requsted extent into the existing extent or
  1696. * inserts requested extent as new one into the tree,
  1697. * creating new leaf in the no-space case.
  1698. */
  1699. int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
  1700. struct ext4_ext_path **ppath,
  1701. struct ext4_extent *newext, int gb_flags)
  1702. {
  1703. struct ext4_ext_path *path = *ppath;
  1704. struct ext4_extent_header *eh;
  1705. struct ext4_extent *ex, *fex;
  1706. struct ext4_extent *nearex; /* nearest extent */
  1707. struct ext4_ext_path *npath = NULL;
  1708. int depth, len, err;
  1709. ext4_lblk_t next;
  1710. int mb_flags = 0, unwritten;
  1711. if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  1712. mb_flags |= EXT4_MB_DELALLOC_RESERVED;
  1713. if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
  1714. EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
  1715. return -EFSCORRUPTED;
  1716. }
  1717. depth = ext_depth(inode);
  1718. ex = path[depth].p_ext;
  1719. eh = path[depth].p_hdr;
  1720. if (unlikely(path[depth].p_hdr == NULL)) {
  1721. EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  1722. return -EFSCORRUPTED;
  1723. }
  1724. /* try to insert block into found extent and return */
  1725. if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
  1726. /*
  1727. * Try to see whether we should rather test the extent on
  1728. * right from ex, or from the left of ex. This is because
  1729. * ext4_find_extent() can return either extent on the
  1730. * left, or on the right from the searched position. This
  1731. * will make merging more effective.
  1732. */
  1733. if (ex < EXT_LAST_EXTENT(eh) &&
  1734. (le32_to_cpu(ex->ee_block) +
  1735. ext4_ext_get_actual_len(ex) <
  1736. le32_to_cpu(newext->ee_block))) {
  1737. ex += 1;
  1738. goto prepend;
  1739. } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
  1740. (le32_to_cpu(newext->ee_block) +
  1741. ext4_ext_get_actual_len(newext) <
  1742. le32_to_cpu(ex->ee_block)))
  1743. ex -= 1;
  1744. /* Try to append newex to the ex */
  1745. if (ext4_can_extents_be_merged(inode, ex, newext)) {
  1746. ext_debug("append [%d]%d block to %u:[%d]%d"
  1747. "(from %llu)\n",
  1748. ext4_ext_is_unwritten(newext),
  1749. ext4_ext_get_actual_len(newext),
  1750. le32_to_cpu(ex->ee_block),
  1751. ext4_ext_is_unwritten(ex),
  1752. ext4_ext_get_actual_len(ex),
  1753. ext4_ext_pblock(ex));
  1754. err = ext4_ext_get_access(handle, inode,
  1755. path + depth);
  1756. if (err)
  1757. return err;
  1758. unwritten = ext4_ext_is_unwritten(ex);
  1759. ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
  1760. + ext4_ext_get_actual_len(newext));
  1761. if (unwritten)
  1762. ext4_ext_mark_unwritten(ex);
  1763. eh = path[depth].p_hdr;
  1764. nearex = ex;
  1765. goto merge;
  1766. }
  1767. prepend:
  1768. /* Try to prepend newex to the ex */
  1769. if (ext4_can_extents_be_merged(inode, newext, ex)) {
  1770. ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
  1771. "(from %llu)\n",
  1772. le32_to_cpu(newext->ee_block),
  1773. ext4_ext_is_unwritten(newext),
  1774. ext4_ext_get_actual_len(newext),
  1775. le32_to_cpu(ex->ee_block),
  1776. ext4_ext_is_unwritten(ex),
  1777. ext4_ext_get_actual_len(ex),
  1778. ext4_ext_pblock(ex));
  1779. err = ext4_ext_get_access(handle, inode,
  1780. path + depth);
  1781. if (err)
  1782. return err;
  1783. unwritten = ext4_ext_is_unwritten(ex);
  1784. ex->ee_block = newext->ee_block;
  1785. ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
  1786. ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
  1787. + ext4_ext_get_actual_len(newext));
  1788. if (unwritten)
  1789. ext4_ext_mark_unwritten(ex);
  1790. eh = path[depth].p_hdr;
  1791. nearex = ex;
  1792. goto merge;
  1793. }
  1794. }
  1795. depth = ext_depth(inode);
  1796. eh = path[depth].p_hdr;
  1797. if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
  1798. goto has_space;
  1799. /* probably next leaf has space for us? */
  1800. fex = EXT_LAST_EXTENT(eh);
  1801. next = EXT_MAX_BLOCKS;
  1802. if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
  1803. next = ext4_ext_next_leaf_block(path);
  1804. if (next != EXT_MAX_BLOCKS) {
  1805. ext_debug("next leaf block - %u\n", next);
  1806. BUG_ON(npath != NULL);
  1807. npath = ext4_find_extent(inode, next, NULL, 0);
  1808. if (IS_ERR(npath))
  1809. return PTR_ERR(npath);
  1810. BUG_ON(npath->p_depth != path->p_depth);
  1811. eh = npath[depth].p_hdr;
  1812. if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
  1813. ext_debug("next leaf isn't full(%d)\n",
  1814. le16_to_cpu(eh->eh_entries));
  1815. path = npath;
  1816. goto has_space;
  1817. }
  1818. ext_debug("next leaf has no free space(%d,%d)\n",
  1819. le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
  1820. }
  1821. /*
  1822. * There is no free space in the found leaf.
  1823. * We're gonna add a new leaf in the tree.
  1824. */
  1825. if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
  1826. mb_flags |= EXT4_MB_USE_RESERVED;
  1827. err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
  1828. ppath, newext);
  1829. if (err)
  1830. goto cleanup;
  1831. depth = ext_depth(inode);
  1832. eh = path[depth].p_hdr;
  1833. has_space:
  1834. nearex = path[depth].p_ext;
  1835. err = ext4_ext_get_access(handle, inode, path + depth);
  1836. if (err)
  1837. goto cleanup;
  1838. if (!nearex) {
  1839. /* there is no extent in this leaf, create first one */
  1840. ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
  1841. le32_to_cpu(newext->ee_block),
  1842. ext4_ext_pblock(newext),
  1843. ext4_ext_is_unwritten(newext),
  1844. ext4_ext_get_actual_len(newext));
  1845. nearex = EXT_FIRST_EXTENT(eh);
  1846. } else {
  1847. if (le32_to_cpu(newext->ee_block)
  1848. > le32_to_cpu(nearex->ee_block)) {
  1849. /* Insert after */
  1850. ext_debug("insert %u:%llu:[%d]%d before: "
  1851. "nearest %p\n",
  1852. le32_to_cpu(newext->ee_block),
  1853. ext4_ext_pblock(newext),
  1854. ext4_ext_is_unwritten(newext),
  1855. ext4_ext_get_actual_len(newext),
  1856. nearex);
  1857. nearex++;
  1858. } else {
  1859. /* Insert before */
  1860. BUG_ON(newext->ee_block == nearex->ee_block);
  1861. ext_debug("insert %u:%llu:[%d]%d after: "
  1862. "nearest %p\n",
  1863. le32_to_cpu(newext->ee_block),
  1864. ext4_ext_pblock(newext),
  1865. ext4_ext_is_unwritten(newext),
  1866. ext4_ext_get_actual_len(newext),
  1867. nearex);
  1868. }
  1869. len = EXT_LAST_EXTENT(eh) - nearex + 1;
  1870. if (len > 0) {
  1871. ext_debug("insert %u:%llu:[%d]%d: "
  1872. "move %d extents from 0x%p to 0x%p\n",
  1873. le32_to_cpu(newext->ee_block),
  1874. ext4_ext_pblock(newext),
  1875. ext4_ext_is_unwritten(newext),
  1876. ext4_ext_get_actual_len(newext),
  1877. len, nearex, nearex + 1);
  1878. memmove(nearex + 1, nearex,
  1879. len * sizeof(struct ext4_extent));
  1880. }
  1881. }
  1882. le16_add_cpu(&eh->eh_entries, 1);
  1883. path[depth].p_ext = nearex;
  1884. nearex->ee_block = newext->ee_block;
  1885. ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
  1886. nearex->ee_len = newext->ee_len;
  1887. merge:
  1888. /* try to merge extents */
  1889. if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
  1890. ext4_ext_try_to_merge(handle, inode, path, nearex);
  1891. /* time to correct all indexes above */
  1892. err = ext4_ext_correct_indexes(handle, inode, path);
  1893. if (err)
  1894. goto cleanup;
  1895. err = ext4_ext_dirty(handle, inode, path + path->p_depth);
  1896. cleanup:
  1897. ext4_ext_drop_refs(npath);
  1898. kfree(npath);
  1899. return err;
  1900. }
  1901. static int ext4_fill_fiemap_extents(struct inode *inode,
  1902. ext4_lblk_t block, ext4_lblk_t num,
  1903. struct fiemap_extent_info *fieinfo)
  1904. {
  1905. struct ext4_ext_path *path = NULL;
  1906. struct ext4_extent *ex;
  1907. struct extent_status es;
  1908. ext4_lblk_t next, next_del, start = 0, end = 0;
  1909. ext4_lblk_t last = block + num;
  1910. int exists, depth = 0, err = 0;
  1911. unsigned int flags = 0;
  1912. unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
  1913. while (block < last && block != EXT_MAX_BLOCKS) {
  1914. num = last - block;
  1915. /* find extent for this block */
  1916. down_read(&EXT4_I(inode)->i_data_sem);
  1917. path = ext4_find_extent(inode, block, &path, 0);
  1918. if (IS_ERR(path)) {
  1919. up_read(&EXT4_I(inode)->i_data_sem);
  1920. err = PTR_ERR(path);
  1921. path = NULL;
  1922. break;
  1923. }
  1924. depth = ext_depth(inode);
  1925. if (unlikely(path[depth].p_hdr == NULL)) {
  1926. up_read(&EXT4_I(inode)->i_data_sem);
  1927. EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  1928. err = -EFSCORRUPTED;
  1929. break;
  1930. }
  1931. ex = path[depth].p_ext;
  1932. next = ext4_ext_next_allocated_block(path);
  1933. flags = 0;
  1934. exists = 0;
  1935. if (!ex) {
  1936. /* there is no extent yet, so try to allocate
  1937. * all requested space */
  1938. start = block;
  1939. end = block + num;
  1940. } else if (le32_to_cpu(ex->ee_block) > block) {
  1941. /* need to allocate space before found extent */
  1942. start = block;
  1943. end = le32_to_cpu(ex->ee_block);
  1944. if (block + num < end)
  1945. end = block + num;
  1946. } else if (block >= le32_to_cpu(ex->ee_block)
  1947. + ext4_ext_get_actual_len(ex)) {
  1948. /* need to allocate space after found extent */
  1949. start = block;
  1950. end = block + num;
  1951. if (end >= next)
  1952. end = next;
  1953. } else if (block >= le32_to_cpu(ex->ee_block)) {
  1954. /*
  1955. * some part of requested space is covered
  1956. * by found extent
  1957. */
  1958. start = block;
  1959. end = le32_to_cpu(ex->ee_block)
  1960. + ext4_ext_get_actual_len(ex);
  1961. if (block + num < end)
  1962. end = block + num;
  1963. exists = 1;
  1964. } else {
  1965. BUG();
  1966. }
  1967. BUG_ON(end <= start);
  1968. if (!exists) {
  1969. es.es_lblk = start;
  1970. es.es_len = end - start;
  1971. es.es_pblk = 0;
  1972. } else {
  1973. es.es_lblk = le32_to_cpu(ex->ee_block);
  1974. es.es_len = ext4_ext_get_actual_len(ex);
  1975. es.es_pblk = ext4_ext_pblock(ex);
  1976. if (ext4_ext_is_unwritten(ex))
  1977. flags |= FIEMAP_EXTENT_UNWRITTEN;
  1978. }
  1979. /*
  1980. * Find delayed extent and update es accordingly. We call
  1981. * it even in !exists case to find out whether es is the
  1982. * last existing extent or not.
  1983. */
  1984. next_del = ext4_find_delayed_extent(inode, &es);
  1985. if (!exists && next_del) {
  1986. exists = 1;
  1987. flags |= (FIEMAP_EXTENT_DELALLOC |
  1988. FIEMAP_EXTENT_UNKNOWN);
  1989. }
  1990. up_read(&EXT4_I(inode)->i_data_sem);
  1991. if (unlikely(es.es_len == 0)) {
  1992. EXT4_ERROR_INODE(inode, "es.es_len == 0");
  1993. err = -EFSCORRUPTED;
  1994. break;
  1995. }
  1996. /*
  1997. * This is possible iff next == next_del == EXT_MAX_BLOCKS.
  1998. * we need to check next == EXT_MAX_BLOCKS because it is
  1999. * possible that an extent is with unwritten and delayed
  2000. * status due to when an extent is delayed allocated and
  2001. * is allocated by fallocate status tree will track both of
  2002. * them in a extent.
  2003. *
  2004. * So we could return a unwritten and delayed extent, and
  2005. * its block is equal to 'next'.
  2006. */
  2007. if (next == next_del && next == EXT_MAX_BLOCKS) {
  2008. flags |= FIEMAP_EXTENT_LAST;
  2009. if (unlikely(next_del != EXT_MAX_BLOCKS ||
  2010. next != EXT_MAX_BLOCKS)) {
  2011. EXT4_ERROR_INODE(inode,
  2012. "next extent == %u, next "
  2013. "delalloc extent = %u",
  2014. next, next_del);
  2015. err = -EFSCORRUPTED;
  2016. break;
  2017. }
  2018. }
  2019. if (exists) {
  2020. err = fiemap_fill_next_extent(fieinfo,
  2021. (__u64)es.es_lblk << blksize_bits,
  2022. (__u64)es.es_pblk << blksize_bits,
  2023. (__u64)es.es_len << blksize_bits,
  2024. flags);
  2025. if (err < 0)
  2026. break;
  2027. if (err == 1) {
  2028. err = 0;
  2029. break;
  2030. }
  2031. }
  2032. block = es.es_lblk + es.es_len;
  2033. }
  2034. ext4_ext_drop_refs(path);
  2035. kfree(path);
  2036. return err;
  2037. }
  2038. /*
  2039. * ext4_ext_determine_hole - determine hole around given block
  2040. * @inode: inode we lookup in
  2041. * @path: path in extent tree to @lblk
  2042. * @lblk: pointer to logical block around which we want to determine hole
  2043. *
  2044. * Determine hole length (and start if easily possible) around given logical
  2045. * block. We don't try too hard to find the beginning of the hole but @path
  2046. * actually points to extent before @lblk, we provide it.
  2047. *
  2048. * The function returns the length of a hole starting at @lblk. We update @lblk
  2049. * to the beginning of the hole if we managed to find it.
  2050. */
  2051. static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
  2052. struct ext4_ext_path *path,
  2053. ext4_lblk_t *lblk)
  2054. {
  2055. int depth = ext_depth(inode);
  2056. struct ext4_extent *ex;
  2057. ext4_lblk_t len;
  2058. ex = path[depth].p_ext;
  2059. if (ex == NULL) {
  2060. /* there is no extent yet, so gap is [0;-] */
  2061. *lblk = 0;
  2062. len = EXT_MAX_BLOCKS;
  2063. } else if (*lblk < le32_to_cpu(ex->ee_block)) {
  2064. len = le32_to_cpu(ex->ee_block) - *lblk;
  2065. } else if (*lblk >= le32_to_cpu(ex->ee_block)
  2066. + ext4_ext_get_actual_len(ex)) {
  2067. ext4_lblk_t next;
  2068. *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
  2069. next = ext4_ext_next_allocated_block(path);
  2070. BUG_ON(next == *lblk);
  2071. len = next - *lblk;
  2072. } else {
  2073. BUG();
  2074. }
  2075. return len;
  2076. }
  2077. /*
  2078. * ext4_ext_put_gap_in_cache:
  2079. * calculate boundaries of the gap that the requested block fits into
  2080. * and cache this gap
  2081. */
  2082. static void
  2083. ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
  2084. ext4_lblk_t hole_len)
  2085. {
  2086. struct extent_status es;
  2087. ext4_es_find_delayed_extent_range(inode, hole_start,
  2088. hole_start + hole_len - 1, &es);
  2089. if (es.es_len) {
  2090. /* There's delayed extent containing lblock? */
  2091. if (es.es_lblk <= hole_start)
  2092. return;
  2093. hole_len = min(es.es_lblk - hole_start, hole_len);
  2094. }
  2095. ext_debug(" -> %u:%u\n", hole_start, hole_len);
  2096. ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
  2097. EXTENT_STATUS_HOLE);
  2098. }
  2099. /*
  2100. * ext4_ext_rm_idx:
  2101. * removes index from the index block.
  2102. */
  2103. static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
  2104. struct ext4_ext_path *path, int depth)
  2105. {
  2106. int err;
  2107. ext4_fsblk_t leaf;
  2108. /* free index block */
  2109. depth--;
  2110. path = path + depth;
  2111. leaf = ext4_idx_pblock(path->p_idx);
  2112. if (unlikely(path->p_hdr->eh_entries == 0)) {
  2113. EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
  2114. return -EFSCORRUPTED;
  2115. }
  2116. err = ext4_ext_get_access(handle, inode, path);
  2117. if (err)
  2118. return err;
  2119. if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
  2120. int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
  2121. len *= sizeof(struct ext4_extent_idx);
  2122. memmove(path->p_idx, path->p_idx + 1, len);
  2123. }
  2124. le16_add_cpu(&path->p_hdr->eh_entries, -1);
  2125. err = ext4_ext_dirty(handle, inode, path);
  2126. if (err)
  2127. return err;
  2128. ext_debug("index is empty, remove it, free block %llu\n", leaf);
  2129. trace_ext4_ext_rm_idx(inode, leaf);
  2130. ext4_free_blocks(handle, inode, NULL, leaf, 1,
  2131. EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
  2132. while (--depth >= 0) {
  2133. if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
  2134. break;
  2135. path--;
  2136. err = ext4_ext_get_access(handle, inode, path);
  2137. if (err)
  2138. break;
  2139. path->p_idx->ei_block = (path+1)->p_idx->ei_block;
  2140. err = ext4_ext_dirty(handle, inode, path);
  2141. if (err)
  2142. break;
  2143. }
  2144. return err;
  2145. }
  2146. /*
  2147. * ext4_ext_calc_credits_for_single_extent:
  2148. * This routine returns max. credits that needed to insert an extent
  2149. * to the extent tree.
  2150. * When pass the actual path, the caller should calculate credits
  2151. * under i_data_sem.
  2152. */
  2153. int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
  2154. struct ext4_ext_path *path)
  2155. {
  2156. if (path) {
  2157. int depth = ext_depth(inode);
  2158. int ret = 0;
  2159. /* probably there is space in leaf? */
  2160. if (le16_to_cpu(path[depth].p_hdr->eh_entries)
  2161. < le16_to_cpu(path[depth].p_hdr->eh_max)) {
  2162. /*
  2163. * There are some space in the leaf tree, no
  2164. * need to account for leaf block credit
  2165. *
  2166. * bitmaps and block group descriptor blocks
  2167. * and other metadata blocks still need to be
  2168. * accounted.
  2169. */
  2170. /* 1 bitmap, 1 block group descriptor */
  2171. ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
  2172. return ret;
  2173. }
  2174. }
  2175. return ext4_chunk_trans_blocks(inode, nrblocks);
  2176. }
  2177. /*
  2178. * How many index/leaf blocks need to change/allocate to add @extents extents?
  2179. *
  2180. * If we add a single extent, then in the worse case, each tree level
  2181. * index/leaf need to be changed in case of the tree split.
  2182. *
  2183. * If more extents are inserted, they could cause the whole tree split more
  2184. * than once, but this is really rare.
  2185. */
  2186. int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
  2187. {
  2188. int index;
  2189. int depth;
  2190. /* If we are converting the inline data, only one is needed here. */
  2191. if (ext4_has_inline_data(inode))
  2192. return 1;
  2193. depth = ext_depth(inode);
  2194. if (extents <= 1)
  2195. index = depth * 2;
  2196. else
  2197. index = depth * 3;
  2198. return index;
  2199. }
  2200. static inline int get_default_free_blocks_flags(struct inode *inode)
  2201. {
  2202. if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
  2203. ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
  2204. return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
  2205. else if (ext4_should_journal_data(inode))
  2206. return EXT4_FREE_BLOCKS_FORGET;
  2207. return 0;
  2208. }
  2209. static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
  2210. struct ext4_extent *ex,
  2211. long long *partial_cluster,
  2212. ext4_lblk_t from, ext4_lblk_t to)
  2213. {
  2214. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  2215. unsigned short ee_len = ext4_ext_get_actual_len(ex);
  2216. ext4_fsblk_t pblk;
  2217. int flags = get_default_free_blocks_flags(inode);
  2218. /*
  2219. * For bigalloc file systems, we never free a partial cluster
  2220. * at the beginning of the extent. Instead, we make a note
  2221. * that we tried freeing the cluster, and check to see if we
  2222. * need to free it on a subsequent call to ext4_remove_blocks,
  2223. * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
  2224. */
  2225. flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
  2226. trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
  2227. /*
  2228. * If we have a partial cluster, and it's different from the
  2229. * cluster of the last block, we need to explicitly free the
  2230. * partial cluster here.
  2231. */
  2232. pblk = ext4_ext_pblock(ex) + ee_len - 1;
  2233. if (*partial_cluster > 0 &&
  2234. *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
  2235. ext4_free_blocks(handle, inode, NULL,
  2236. EXT4_C2B(sbi, *partial_cluster),
  2237. sbi->s_cluster_ratio, flags);
  2238. *partial_cluster = 0;
  2239. }
  2240. #ifdef EXTENTS_STATS
  2241. {
  2242. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  2243. spin_lock(&sbi->s_ext_stats_lock);
  2244. sbi->s_ext_blocks += ee_len;
  2245. sbi->s_ext_extents++;
  2246. if (ee_len < sbi->s_ext_min)
  2247. sbi->s_ext_min = ee_len;
  2248. if (ee_len > sbi->s_ext_max)
  2249. sbi->s_ext_max = ee_len;
  2250. if (ext_depth(inode) > sbi->s_depth_max)
  2251. sbi->s_depth_max = ext_depth(inode);
  2252. spin_unlock(&sbi->s_ext_stats_lock);
  2253. }
  2254. #endif
  2255. if (from >= le32_to_cpu(ex->ee_block)
  2256. && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
  2257. /* tail removal */
  2258. ext4_lblk_t num;
  2259. long long first_cluster;
  2260. num = le32_to_cpu(ex->ee_block) + ee_len - from;
  2261. pblk = ext4_ext_pblock(ex) + ee_len - num;
  2262. /*
  2263. * Usually we want to free partial cluster at the end of the
  2264. * extent, except for the situation when the cluster is still
  2265. * used by any other extent (partial_cluster is negative).
  2266. */
  2267. if (*partial_cluster < 0 &&
  2268. *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
  2269. flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
  2270. ext_debug("free last %u blocks starting %llu partial %lld\n",
  2271. num, pblk, *partial_cluster);
  2272. ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
  2273. /*
  2274. * If the block range to be freed didn't start at the
  2275. * beginning of a cluster, and we removed the entire
  2276. * extent and the cluster is not used by any other extent,
  2277. * save the partial cluster here, since we might need to
  2278. * delete if we determine that the truncate or punch hole
  2279. * operation has removed all of the blocks in the cluster.
  2280. * If that cluster is used by another extent, preserve its
  2281. * negative value so it isn't freed later on.
  2282. *
  2283. * If the whole extent wasn't freed, we've reached the
  2284. * start of the truncated/punched region and have finished
  2285. * removing blocks. If there's a partial cluster here it's
  2286. * shared with the remainder of the extent and is no longer
  2287. * a candidate for removal.
  2288. */
  2289. if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
  2290. first_cluster = (long long) EXT4_B2C(sbi, pblk);
  2291. if (first_cluster != -*partial_cluster)
  2292. *partial_cluster = first_cluster;
  2293. } else {
  2294. *partial_cluster = 0;
  2295. }
  2296. } else
  2297. ext4_error(sbi->s_sb, "strange request: removal(2) "
  2298. "%u-%u from %u:%u",
  2299. from, to, le32_to_cpu(ex->ee_block), ee_len);
  2300. return 0;
  2301. }
  2302. /*
  2303. * ext4_ext_rm_leaf() Removes the extents associated with the
  2304. * blocks appearing between "start" and "end". Both "start"
  2305. * and "end" must appear in the same extent or EIO is returned.
  2306. *
  2307. * @handle: The journal handle
  2308. * @inode: The files inode
  2309. * @path: The path to the leaf
  2310. * @partial_cluster: The cluster which we'll have to free if all extents
  2311. * has been released from it. However, if this value is
  2312. * negative, it's a cluster just to the right of the
  2313. * punched region and it must not be freed.
  2314. * @start: The first block to remove
  2315. * @end: The last block to remove
  2316. */
  2317. static int
  2318. ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
  2319. struct ext4_ext_path *path,
  2320. long long *partial_cluster,
  2321. ext4_lblk_t start, ext4_lblk_t end)
  2322. {
  2323. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  2324. int err = 0, correct_index = 0;
  2325. int depth = ext_depth(inode), credits;
  2326. struct ext4_extent_header *eh;
  2327. ext4_lblk_t a, b;
  2328. unsigned num;
  2329. ext4_lblk_t ex_ee_block;
  2330. unsigned short ex_ee_len;
  2331. unsigned unwritten = 0;
  2332. struct ext4_extent *ex;
  2333. ext4_fsblk_t pblk;
  2334. /* the header must be checked already in ext4_ext_remove_space() */
  2335. ext_debug("truncate since %u in leaf to %u\n", start, end);
  2336. if (!path[depth].p_hdr)
  2337. path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
  2338. eh = path[depth].p_hdr;
  2339. if (unlikely(path[depth].p_hdr == NULL)) {
  2340. EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  2341. return -EFSCORRUPTED;
  2342. }
  2343. /* find where to start removing */
  2344. ex = path[depth].p_ext;
  2345. if (!ex)
  2346. ex = EXT_LAST_EXTENT(eh);
  2347. ex_ee_block = le32_to_cpu(ex->ee_block);
  2348. ex_ee_len = ext4_ext_get_actual_len(ex);
  2349. trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
  2350. while (ex >= EXT_FIRST_EXTENT(eh) &&
  2351. ex_ee_block + ex_ee_len > start) {
  2352. if (ext4_ext_is_unwritten(ex))
  2353. unwritten = 1;
  2354. else
  2355. unwritten = 0;
  2356. ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
  2357. unwritten, ex_ee_len);
  2358. path[depth].p_ext = ex;
  2359. a = ex_ee_block > start ? ex_ee_block : start;
  2360. b = ex_ee_block+ex_ee_len - 1 < end ?
  2361. ex_ee_block+ex_ee_len - 1 : end;
  2362. ext_debug(" border %u:%u\n", a, b);
  2363. /* If this extent is beyond the end of the hole, skip it */
  2364. if (end < ex_ee_block) {
  2365. /*
  2366. * We're going to skip this extent and move to another,
  2367. * so note that its first cluster is in use to avoid
  2368. * freeing it when removing blocks. Eventually, the
  2369. * right edge of the truncated/punched region will
  2370. * be just to the left.
  2371. */
  2372. if (sbi->s_cluster_ratio > 1) {
  2373. pblk = ext4_ext_pblock(ex);
  2374. *partial_cluster =
  2375. -(long long) EXT4_B2C(sbi, pblk);
  2376. }
  2377. ex--;
  2378. ex_ee_block = le32_to_cpu(ex->ee_block);
  2379. ex_ee_len = ext4_ext_get_actual_len(ex);
  2380. continue;
  2381. } else if (b != ex_ee_block + ex_ee_len - 1) {
  2382. EXT4_ERROR_INODE(inode,
  2383. "can not handle truncate %u:%u "
  2384. "on extent %u:%u",
  2385. start, end, ex_ee_block,
  2386. ex_ee_block + ex_ee_len - 1);
  2387. err = -EFSCORRUPTED;
  2388. goto out;
  2389. } else if (a != ex_ee_block) {
  2390. /* remove tail of the extent */
  2391. num = a - ex_ee_block;
  2392. } else {
  2393. /* remove whole extent: excellent! */
  2394. num = 0;
  2395. }
  2396. /*
  2397. * 3 for leaf, sb, and inode plus 2 (bmap and group
  2398. * descriptor) for each block group; assume two block
  2399. * groups plus ex_ee_len/blocks_per_block_group for
  2400. * the worst case
  2401. */
  2402. credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
  2403. if (ex == EXT_FIRST_EXTENT(eh)) {
  2404. correct_index = 1;
  2405. credits += (ext_depth(inode)) + 1;
  2406. }
  2407. credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
  2408. err = ext4_ext_truncate_extend_restart(handle, inode, credits);
  2409. if (err)
  2410. goto out;
  2411. err = ext4_ext_get_access(handle, inode, path + depth);
  2412. if (err)
  2413. goto out;
  2414. err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
  2415. a, b);
  2416. if (err)
  2417. goto out;
  2418. if (num == 0)
  2419. /* this extent is removed; mark slot entirely unused */
  2420. ext4_ext_store_pblock(ex, 0);
  2421. ex->ee_len = cpu_to_le16(num);
  2422. /*
  2423. * Do not mark unwritten if all the blocks in the
  2424. * extent have been removed.
  2425. */
  2426. if (unwritten && num)
  2427. ext4_ext_mark_unwritten(ex);
  2428. /*
  2429. * If the extent was completely released,
  2430. * we need to remove it from the leaf
  2431. */
  2432. if (num == 0) {
  2433. if (end != EXT_MAX_BLOCKS - 1) {
  2434. /*
  2435. * For hole punching, we need to scoot all the
  2436. * extents up when an extent is removed so that
  2437. * we dont have blank extents in the middle
  2438. */
  2439. memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
  2440. sizeof(struct ext4_extent));
  2441. /* Now get rid of the one at the end */
  2442. memset(EXT_LAST_EXTENT(eh), 0,
  2443. sizeof(struct ext4_extent));
  2444. }
  2445. le16_add_cpu(&eh->eh_entries, -1);
  2446. }
  2447. err = ext4_ext_dirty(handle, inode, path + depth);
  2448. if (err)
  2449. goto out;
  2450. ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
  2451. ext4_ext_pblock(ex));
  2452. ex--;
  2453. ex_ee_block = le32_to_cpu(ex->ee_block);
  2454. ex_ee_len = ext4_ext_get_actual_len(ex);
  2455. }
  2456. if (correct_index && eh->eh_entries)
  2457. err = ext4_ext_correct_indexes(handle, inode, path);
  2458. /*
  2459. * If there's a partial cluster and at least one extent remains in
  2460. * the leaf, free the partial cluster if it isn't shared with the
  2461. * current extent. If it is shared with the current extent
  2462. * we zero partial_cluster because we've reached the start of the
  2463. * truncated/punched region and we're done removing blocks.
  2464. */
  2465. if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
  2466. pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
  2467. if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
  2468. ext4_free_blocks(handle, inode, NULL,
  2469. EXT4_C2B(sbi, *partial_cluster),
  2470. sbi->s_cluster_ratio,
  2471. get_default_free_blocks_flags(inode));
  2472. }
  2473. *partial_cluster = 0;
  2474. }
  2475. /* if this leaf is free, then we should
  2476. * remove it from index block above */
  2477. if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
  2478. err = ext4_ext_rm_idx(handle, inode, path, depth);
  2479. out:
  2480. return err;
  2481. }
  2482. /*
  2483. * ext4_ext_more_to_rm:
  2484. * returns 1 if current index has to be freed (even partial)
  2485. */
  2486. static int
  2487. ext4_ext_more_to_rm(struct ext4_ext_path *path)
  2488. {
  2489. BUG_ON(path->p_idx == NULL);
  2490. if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
  2491. return 0;
  2492. /*
  2493. * if truncate on deeper level happened, it wasn't partial,
  2494. * so we have to consider current index for truncation
  2495. */
  2496. if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
  2497. return 0;
  2498. return 1;
  2499. }
  2500. int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
  2501. ext4_lblk_t end)
  2502. {
  2503. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  2504. int depth = ext_depth(inode);
  2505. struct ext4_ext_path *path = NULL;
  2506. long long partial_cluster = 0;
  2507. handle_t *handle;
  2508. int i = 0, err = 0;
  2509. ext_debug("truncate since %u to %u\n", start, end);
  2510. /* probably first extent we're gonna free will be last in block */
  2511. handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
  2512. if (IS_ERR(handle))
  2513. return PTR_ERR(handle);
  2514. again:
  2515. trace_ext4_ext_remove_space(inode, start, end, depth);
  2516. /*
  2517. * Check if we are removing extents inside the extent tree. If that
  2518. * is the case, we are going to punch a hole inside the extent tree
  2519. * so we have to check whether we need to split the extent covering
  2520. * the last block to remove so we can easily remove the part of it
  2521. * in ext4_ext_rm_leaf().
  2522. */
  2523. if (end < EXT_MAX_BLOCKS - 1) {
  2524. struct ext4_extent *ex;
  2525. ext4_lblk_t ee_block, ex_end, lblk;
  2526. ext4_fsblk_t pblk;
  2527. /* find extent for or closest extent to this block */
  2528. path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
  2529. if (IS_ERR(path)) {
  2530. ext4_journal_stop(handle);
  2531. return PTR_ERR(path);
  2532. }
  2533. depth = ext_depth(inode);
  2534. /* Leaf not may not exist only if inode has no blocks at all */
  2535. ex = path[depth].p_ext;
  2536. if (!ex) {
  2537. if (depth) {
  2538. EXT4_ERROR_INODE(inode,
  2539. "path[%d].p_hdr == NULL",
  2540. depth);
  2541. err = -EFSCORRUPTED;
  2542. }
  2543. goto out;
  2544. }
  2545. ee_block = le32_to_cpu(ex->ee_block);
  2546. ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
  2547. /*
  2548. * See if the last block is inside the extent, if so split
  2549. * the extent at 'end' block so we can easily remove the
  2550. * tail of the first part of the split extent in
  2551. * ext4_ext_rm_leaf().
  2552. */
  2553. if (end >= ee_block && end < ex_end) {
  2554. /*
  2555. * If we're going to split the extent, note that
  2556. * the cluster containing the block after 'end' is
  2557. * in use to avoid freeing it when removing blocks.
  2558. */
  2559. if (sbi->s_cluster_ratio > 1) {
  2560. pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
  2561. partial_cluster =
  2562. -(long long) EXT4_B2C(sbi, pblk);
  2563. }
  2564. /*
  2565. * Split the extent in two so that 'end' is the last
  2566. * block in the first new extent. Also we should not
  2567. * fail removing space due to ENOSPC so try to use
  2568. * reserved block if that happens.
  2569. */
  2570. err = ext4_force_split_extent_at(handle, inode, &path,
  2571. end + 1, 1);
  2572. if (err < 0)
  2573. goto out;
  2574. } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
  2575. /*
  2576. * If there's an extent to the right its first cluster
  2577. * contains the immediate right boundary of the
  2578. * truncated/punched region. Set partial_cluster to
  2579. * its negative value so it won't be freed if shared
  2580. * with the current extent. The end < ee_block case
  2581. * is handled in ext4_ext_rm_leaf().
  2582. */
  2583. lblk = ex_end + 1;
  2584. err = ext4_ext_search_right(inode, path, &lblk, &pblk,
  2585. &ex);
  2586. if (err)
  2587. goto out;
  2588. if (pblk)
  2589. partial_cluster =
  2590. -(long long) EXT4_B2C(sbi, pblk);
  2591. }
  2592. }
  2593. /*
  2594. * We start scanning from right side, freeing all the blocks
  2595. * after i_size and walking into the tree depth-wise.
  2596. */
  2597. depth = ext_depth(inode);
  2598. if (path) {
  2599. int k = i = depth;
  2600. while (--k > 0)
  2601. path[k].p_block =
  2602. le16_to_cpu(path[k].p_hdr->eh_entries)+1;
  2603. } else {
  2604. path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
  2605. GFP_NOFS);
  2606. if (path == NULL) {
  2607. ext4_journal_stop(handle);
  2608. return -ENOMEM;
  2609. }
  2610. path[0].p_maxdepth = path[0].p_depth = depth;
  2611. path[0].p_hdr = ext_inode_hdr(inode);
  2612. i = 0;
  2613. if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
  2614. err = -EFSCORRUPTED;
  2615. goto out;
  2616. }
  2617. }
  2618. err = 0;
  2619. while (i >= 0 && err == 0) {
  2620. if (i == depth) {
  2621. /* this is leaf block */
  2622. err = ext4_ext_rm_leaf(handle, inode, path,
  2623. &partial_cluster, start,
  2624. end);
  2625. /* root level has p_bh == NULL, brelse() eats this */
  2626. brelse(path[i].p_bh);
  2627. path[i].p_bh = NULL;
  2628. i--;
  2629. continue;
  2630. }
  2631. /* this is index block */
  2632. if (!path[i].p_hdr) {
  2633. ext_debug("initialize header\n");
  2634. path[i].p_hdr = ext_block_hdr(path[i].p_bh);
  2635. }
  2636. if (!path[i].p_idx) {
  2637. /* this level hasn't been touched yet */
  2638. path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
  2639. path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
  2640. ext_debug("init index ptr: hdr 0x%p, num %d\n",
  2641. path[i].p_hdr,
  2642. le16_to_cpu(path[i].p_hdr->eh_entries));
  2643. } else {
  2644. /* we were already here, see at next index */
  2645. path[i].p_idx--;
  2646. }
  2647. ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
  2648. i, EXT_FIRST_INDEX(path[i].p_hdr),
  2649. path[i].p_idx);
  2650. if (ext4_ext_more_to_rm(path + i)) {
  2651. struct buffer_head *bh;
  2652. /* go to the next level */
  2653. ext_debug("move to level %d (block %llu)\n",
  2654. i + 1, ext4_idx_pblock(path[i].p_idx));
  2655. memset(path + i + 1, 0, sizeof(*path));
  2656. bh = read_extent_tree_block(inode,
  2657. ext4_idx_pblock(path[i].p_idx), depth - i - 1,
  2658. EXT4_EX_NOCACHE);
  2659. if (IS_ERR(bh)) {
  2660. /* should we reset i_size? */
  2661. err = PTR_ERR(bh);
  2662. break;
  2663. }
  2664. /* Yield here to deal with large extent trees.
  2665. * Should be a no-op if we did IO above. */
  2666. cond_resched();
  2667. if (WARN_ON(i + 1 > depth)) {
  2668. err = -EFSCORRUPTED;
  2669. break;
  2670. }
  2671. path[i + 1].p_bh = bh;
  2672. /* save actual number of indexes since this
  2673. * number is changed at the next iteration */
  2674. path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
  2675. i++;
  2676. } else {
  2677. /* we finished processing this index, go up */
  2678. if (path[i].p_hdr->eh_entries == 0 && i > 0) {
  2679. /* index is empty, remove it;
  2680. * handle must be already prepared by the
  2681. * truncatei_leaf() */
  2682. err = ext4_ext_rm_idx(handle, inode, path, i);
  2683. }
  2684. /* root level has p_bh == NULL, brelse() eats this */
  2685. brelse(path[i].p_bh);
  2686. path[i].p_bh = NULL;
  2687. i--;
  2688. ext_debug("return to level %d\n", i);
  2689. }
  2690. }
  2691. trace_ext4_ext_remove_space_done(inode, start, end, depth,
  2692. partial_cluster, path->p_hdr->eh_entries);
  2693. /*
  2694. * If we still have something in the partial cluster and we have removed
  2695. * even the first extent, then we should free the blocks in the partial
  2696. * cluster as well. (This code will only run when there are no leaves
  2697. * to the immediate left of the truncated/punched region.)
  2698. */
  2699. if (partial_cluster > 0 && err == 0) {
  2700. /* don't zero partial_cluster since it's not used afterwards */
  2701. ext4_free_blocks(handle, inode, NULL,
  2702. EXT4_C2B(sbi, partial_cluster),
  2703. sbi->s_cluster_ratio,
  2704. get_default_free_blocks_flags(inode));
  2705. }
  2706. /* TODO: flexible tree reduction should be here */
  2707. if (path->p_hdr->eh_entries == 0) {
  2708. /*
  2709. * truncate to zero freed all the tree,
  2710. * so we need to correct eh_depth
  2711. */
  2712. err = ext4_ext_get_access(handle, inode, path);
  2713. if (err == 0) {
  2714. ext_inode_hdr(inode)->eh_depth = 0;
  2715. ext_inode_hdr(inode)->eh_max =
  2716. cpu_to_le16(ext4_ext_space_root(inode, 0));
  2717. err = ext4_ext_dirty(handle, inode, path);
  2718. }
  2719. }
  2720. out:
  2721. ext4_ext_drop_refs(path);
  2722. kfree(path);
  2723. path = NULL;
  2724. if (err == -EAGAIN)
  2725. goto again;
  2726. ext4_journal_stop(handle);
  2727. return err;
  2728. }
  2729. /*
  2730. * called at mount time
  2731. */
  2732. void ext4_ext_init(struct super_block *sb)
  2733. {
  2734. /*
  2735. * possible initialization would be here
  2736. */
  2737. if (ext4_has_feature_extents(sb)) {
  2738. #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
  2739. printk(KERN_INFO "EXT4-fs: file extents enabled"
  2740. #ifdef AGGRESSIVE_TEST
  2741. ", aggressive tests"
  2742. #endif
  2743. #ifdef CHECK_BINSEARCH
  2744. ", check binsearch"
  2745. #endif
  2746. #ifdef EXTENTS_STATS
  2747. ", stats"
  2748. #endif
  2749. "\n");
  2750. #endif
  2751. #ifdef EXTENTS_STATS
  2752. spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
  2753. EXT4_SB(sb)->s_ext_min = 1 << 30;
  2754. EXT4_SB(sb)->s_ext_max = 0;
  2755. #endif
  2756. }
  2757. }
  2758. /*
  2759. * called at umount time
  2760. */
  2761. void ext4_ext_release(struct super_block *sb)
  2762. {
  2763. if (!ext4_has_feature_extents(sb))
  2764. return;
  2765. #ifdef EXTENTS_STATS
  2766. if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
  2767. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2768. printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
  2769. sbi->s_ext_blocks, sbi->s_ext_extents,
  2770. sbi->s_ext_blocks / sbi->s_ext_extents);
  2771. printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
  2772. sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
  2773. }
  2774. #endif
  2775. }
  2776. static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
  2777. {
  2778. ext4_lblk_t ee_block;
  2779. ext4_fsblk_t ee_pblock;
  2780. unsigned int ee_len;
  2781. ee_block = le32_to_cpu(ex->ee_block);
  2782. ee_len = ext4_ext_get_actual_len(ex);
  2783. ee_pblock = ext4_ext_pblock(ex);
  2784. if (ee_len == 0)
  2785. return 0;
  2786. return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
  2787. EXTENT_STATUS_WRITTEN);
  2788. }
  2789. /* FIXME!! we need to try to merge to left or right after zero-out */
  2790. static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
  2791. {
  2792. ext4_fsblk_t ee_pblock;
  2793. unsigned int ee_len;
  2794. ee_len = ext4_ext_get_actual_len(ex);
  2795. ee_pblock = ext4_ext_pblock(ex);
  2796. return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
  2797. ee_len);
  2798. }
  2799. /*
  2800. * ext4_split_extent_at() splits an extent at given block.
  2801. *
  2802. * @handle: the journal handle
  2803. * @inode: the file inode
  2804. * @path: the path to the extent
  2805. * @split: the logical block where the extent is splitted.
  2806. * @split_flags: indicates if the extent could be zeroout if split fails, and
  2807. * the states(init or unwritten) of new extents.
  2808. * @flags: flags used to insert new extent to extent tree.
  2809. *
  2810. *
  2811. * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
  2812. * of which are deterimined by split_flag.
  2813. *
  2814. * There are two cases:
  2815. * a> the extent are splitted into two extent.
  2816. * b> split is not needed, and just mark the extent.
  2817. *
  2818. * return 0 on success.
  2819. */
  2820. static int ext4_split_extent_at(handle_t *handle,
  2821. struct inode *inode,
  2822. struct ext4_ext_path **ppath,
  2823. ext4_lblk_t split,
  2824. int split_flag,
  2825. int flags)
  2826. {
  2827. struct ext4_ext_path *path = *ppath;
  2828. ext4_fsblk_t newblock;
  2829. ext4_lblk_t ee_block;
  2830. struct ext4_extent *ex, newex, orig_ex, zero_ex;
  2831. struct ext4_extent *ex2 = NULL;
  2832. unsigned int ee_len, depth;
  2833. int err = 0;
  2834. BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
  2835. (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
  2836. ext_debug("ext4_split_extents_at: inode %lu, logical"
  2837. "block %llu\n", inode->i_ino, (unsigned long long)split);
  2838. ext4_ext_show_leaf(inode, path);
  2839. depth = ext_depth(inode);
  2840. ex = path[depth].p_ext;
  2841. ee_block = le32_to_cpu(ex->ee_block);
  2842. ee_len = ext4_ext_get_actual_len(ex);
  2843. newblock = split - ee_block + ext4_ext_pblock(ex);
  2844. BUG_ON(split < ee_block || split >= (ee_block + ee_len));
  2845. BUG_ON(!ext4_ext_is_unwritten(ex) &&
  2846. split_flag & (EXT4_EXT_MAY_ZEROOUT |
  2847. EXT4_EXT_MARK_UNWRIT1 |
  2848. EXT4_EXT_MARK_UNWRIT2));
  2849. err = ext4_ext_get_access(handle, inode, path + depth);
  2850. if (err)
  2851. goto out;
  2852. if (split == ee_block) {
  2853. /*
  2854. * case b: block @split is the block that the extent begins with
  2855. * then we just change the state of the extent, and splitting
  2856. * is not needed.
  2857. */
  2858. if (split_flag & EXT4_EXT_MARK_UNWRIT2)
  2859. ext4_ext_mark_unwritten(ex);
  2860. else
  2861. ext4_ext_mark_initialized(ex);
  2862. if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
  2863. ext4_ext_try_to_merge(handle, inode, path, ex);
  2864. err = ext4_ext_dirty(handle, inode, path + path->p_depth);
  2865. goto out;
  2866. }
  2867. /* case a */
  2868. memcpy(&orig_ex, ex, sizeof(orig_ex));
  2869. ex->ee_len = cpu_to_le16(split - ee_block);
  2870. if (split_flag & EXT4_EXT_MARK_UNWRIT1)
  2871. ext4_ext_mark_unwritten(ex);
  2872. /*
  2873. * path may lead to new leaf, not to original leaf any more
  2874. * after ext4_ext_insert_extent() returns,
  2875. */
  2876. err = ext4_ext_dirty(handle, inode, path + depth);
  2877. if (err)
  2878. goto fix_extent_len;
  2879. ex2 = &newex;
  2880. ex2->ee_block = cpu_to_le32(split);
  2881. ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
  2882. ext4_ext_store_pblock(ex2, newblock);
  2883. if (split_flag & EXT4_EXT_MARK_UNWRIT2)
  2884. ext4_ext_mark_unwritten(ex2);
  2885. err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
  2886. if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  2887. if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
  2888. if (split_flag & EXT4_EXT_DATA_VALID1) {
  2889. err = ext4_ext_zeroout(inode, ex2);
  2890. zero_ex.ee_block = ex2->ee_block;
  2891. zero_ex.ee_len = cpu_to_le16(
  2892. ext4_ext_get_actual_len(ex2));
  2893. ext4_ext_store_pblock(&zero_ex,
  2894. ext4_ext_pblock(ex2));
  2895. } else {
  2896. err = ext4_ext_zeroout(inode, ex);
  2897. zero_ex.ee_block = ex->ee_block;
  2898. zero_ex.ee_len = cpu_to_le16(
  2899. ext4_ext_get_actual_len(ex));
  2900. ext4_ext_store_pblock(&zero_ex,
  2901. ext4_ext_pblock(ex));
  2902. }
  2903. } else {
  2904. err = ext4_ext_zeroout(inode, &orig_ex);
  2905. zero_ex.ee_block = orig_ex.ee_block;
  2906. zero_ex.ee_len = cpu_to_le16(
  2907. ext4_ext_get_actual_len(&orig_ex));
  2908. ext4_ext_store_pblock(&zero_ex,
  2909. ext4_ext_pblock(&orig_ex));
  2910. }
  2911. if (err)
  2912. goto fix_extent_len;
  2913. /* update the extent length and mark as initialized */
  2914. ex->ee_len = cpu_to_le16(ee_len);
  2915. ext4_ext_try_to_merge(handle, inode, path, ex);
  2916. err = ext4_ext_dirty(handle, inode, path + path->p_depth);
  2917. if (err)
  2918. goto fix_extent_len;
  2919. /* update extent status tree */
  2920. err = ext4_zeroout_es(inode, &zero_ex);
  2921. goto out;
  2922. } else if (err)
  2923. goto fix_extent_len;
  2924. out:
  2925. ext4_ext_show_leaf(inode, path);
  2926. return err;
  2927. fix_extent_len:
  2928. ex->ee_len = orig_ex.ee_len;
  2929. ext4_ext_dirty(handle, inode, path + path->p_depth);
  2930. return err;
  2931. }
  2932. /*
  2933. * ext4_split_extents() splits an extent and mark extent which is covered
  2934. * by @map as split_flags indicates
  2935. *
  2936. * It may result in splitting the extent into multiple extents (up to three)
  2937. * There are three possibilities:
  2938. * a> There is no split required
  2939. * b> Splits in two extents: Split is happening at either end of the extent
  2940. * c> Splits in three extents: Somone is splitting in middle of the extent
  2941. *
  2942. */
  2943. static int ext4_split_extent(handle_t *handle,
  2944. struct inode *inode,
  2945. struct ext4_ext_path **ppath,
  2946. struct ext4_map_blocks *map,
  2947. int split_flag,
  2948. int flags)
  2949. {
  2950. struct ext4_ext_path *path = *ppath;
  2951. ext4_lblk_t ee_block;
  2952. struct ext4_extent *ex;
  2953. unsigned int ee_len, depth;
  2954. int err = 0;
  2955. int unwritten;
  2956. int split_flag1, flags1;
  2957. int allocated = map->m_len;
  2958. depth = ext_depth(inode);
  2959. ex = path[depth].p_ext;
  2960. ee_block = le32_to_cpu(ex->ee_block);
  2961. ee_len = ext4_ext_get_actual_len(ex);
  2962. unwritten = ext4_ext_is_unwritten(ex);
  2963. if (map->m_lblk + map->m_len < ee_block + ee_len) {
  2964. split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
  2965. flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
  2966. if (unwritten)
  2967. split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
  2968. EXT4_EXT_MARK_UNWRIT2;
  2969. if (split_flag & EXT4_EXT_DATA_VALID2)
  2970. split_flag1 |= EXT4_EXT_DATA_VALID1;
  2971. err = ext4_split_extent_at(handle, inode, ppath,
  2972. map->m_lblk + map->m_len, split_flag1, flags1);
  2973. if (err)
  2974. goto out;
  2975. } else {
  2976. allocated = ee_len - (map->m_lblk - ee_block);
  2977. }
  2978. /*
  2979. * Update path is required because previous ext4_split_extent_at() may
  2980. * result in split of original leaf or extent zeroout.
  2981. */
  2982. path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
  2983. if (IS_ERR(path))
  2984. return PTR_ERR(path);
  2985. depth = ext_depth(inode);
  2986. ex = path[depth].p_ext;
  2987. if (!ex) {
  2988. EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
  2989. (unsigned long) map->m_lblk);
  2990. return -EFSCORRUPTED;
  2991. }
  2992. unwritten = ext4_ext_is_unwritten(ex);
  2993. split_flag1 = 0;
  2994. if (map->m_lblk >= ee_block) {
  2995. split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
  2996. if (unwritten) {
  2997. split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
  2998. split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
  2999. EXT4_EXT_MARK_UNWRIT2);
  3000. }
  3001. err = ext4_split_extent_at(handle, inode, ppath,
  3002. map->m_lblk, split_flag1, flags);
  3003. if (err)
  3004. goto out;
  3005. }
  3006. ext4_ext_show_leaf(inode, path);
  3007. out:
  3008. return err ? err : allocated;
  3009. }
  3010. /*
  3011. * This function is called by ext4_ext_map_blocks() if someone tries to write
  3012. * to an unwritten extent. It may result in splitting the unwritten
  3013. * extent into multiple extents (up to three - one initialized and two
  3014. * unwritten).
  3015. * There are three possibilities:
  3016. * a> There is no split required: Entire extent should be initialized
  3017. * b> Splits in two extents: Write is happening at either end of the extent
  3018. * c> Splits in three extents: Somone is writing in middle of the extent
  3019. *
  3020. * Pre-conditions:
  3021. * - The extent pointed to by 'path' is unwritten.
  3022. * - The extent pointed to by 'path' contains a superset
  3023. * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
  3024. *
  3025. * Post-conditions on success:
  3026. * - the returned value is the number of blocks beyond map->l_lblk
  3027. * that are allocated and initialized.
  3028. * It is guaranteed to be >= map->m_len.
  3029. */
  3030. static int ext4_ext_convert_to_initialized(handle_t *handle,
  3031. struct inode *inode,
  3032. struct ext4_map_blocks *map,
  3033. struct ext4_ext_path **ppath,
  3034. int flags)
  3035. {
  3036. struct ext4_ext_path *path = *ppath;
  3037. struct ext4_sb_info *sbi;
  3038. struct ext4_extent_header *eh;
  3039. struct ext4_map_blocks split_map;
  3040. struct ext4_extent zero_ex1, zero_ex2;
  3041. struct ext4_extent *ex, *abut_ex;
  3042. ext4_lblk_t ee_block, eof_block;
  3043. unsigned int ee_len, depth, map_len = map->m_len;
  3044. int allocated = 0, max_zeroout = 0;
  3045. int err = 0;
  3046. int split_flag = EXT4_EXT_DATA_VALID2;
  3047. ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
  3048. "block %llu, max_blocks %u\n", inode->i_ino,
  3049. (unsigned long long)map->m_lblk, map_len);
  3050. sbi = EXT4_SB(inode->i_sb);
  3051. eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  3052. inode->i_sb->s_blocksize_bits;
  3053. if (eof_block < map->m_lblk + map_len)
  3054. eof_block = map->m_lblk + map_len;
  3055. depth = ext_depth(inode);
  3056. eh = path[depth].p_hdr;
  3057. ex = path[depth].p_ext;
  3058. ee_block = le32_to_cpu(ex->ee_block);
  3059. ee_len = ext4_ext_get_actual_len(ex);
  3060. zero_ex1.ee_len = 0;
  3061. zero_ex2.ee_len = 0;
  3062. trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
  3063. /* Pre-conditions */
  3064. BUG_ON(!ext4_ext_is_unwritten(ex));
  3065. BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
  3066. /*
  3067. * Attempt to transfer newly initialized blocks from the currently
  3068. * unwritten extent to its neighbor. This is much cheaper
  3069. * than an insertion followed by a merge as those involve costly
  3070. * memmove() calls. Transferring to the left is the common case in
  3071. * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
  3072. * followed by append writes.
  3073. *
  3074. * Limitations of the current logic:
  3075. * - L1: we do not deal with writes covering the whole extent.
  3076. * This would require removing the extent if the transfer
  3077. * is possible.
  3078. * - L2: we only attempt to merge with an extent stored in the
  3079. * same extent tree node.
  3080. */
  3081. if ((map->m_lblk == ee_block) &&
  3082. /* See if we can merge left */
  3083. (map_len < ee_len) && /*L1*/
  3084. (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/
  3085. ext4_lblk_t prev_lblk;
  3086. ext4_fsblk_t prev_pblk, ee_pblk;
  3087. unsigned int prev_len;
  3088. abut_ex = ex - 1;
  3089. prev_lblk = le32_to_cpu(abut_ex->ee_block);
  3090. prev_len = ext4_ext_get_actual_len(abut_ex);
  3091. prev_pblk = ext4_ext_pblock(abut_ex);
  3092. ee_pblk = ext4_ext_pblock(ex);
  3093. /*
  3094. * A transfer of blocks from 'ex' to 'abut_ex' is allowed
  3095. * upon those conditions:
  3096. * - C1: abut_ex is initialized,
  3097. * - C2: abut_ex is logically abutting ex,
  3098. * - C3: abut_ex is physically abutting ex,
  3099. * - C4: abut_ex can receive the additional blocks without
  3100. * overflowing the (initialized) length limit.
  3101. */
  3102. if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
  3103. ((prev_lblk + prev_len) == ee_block) && /*C2*/
  3104. ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
  3105. (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
  3106. err = ext4_ext_get_access(handle, inode, path + depth);
  3107. if (err)
  3108. goto out;
  3109. trace_ext4_ext_convert_to_initialized_fastpath(inode,
  3110. map, ex, abut_ex);
  3111. /* Shift the start of ex by 'map_len' blocks */
  3112. ex->ee_block = cpu_to_le32(ee_block + map_len);
  3113. ext4_ext_store_pblock(ex, ee_pblk + map_len);
  3114. ex->ee_len = cpu_to_le16(ee_len - map_len);
  3115. ext4_ext_mark_unwritten(ex); /* Restore the flag */
  3116. /* Extend abut_ex by 'map_len' blocks */
  3117. abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
  3118. /* Result: number of initialized blocks past m_lblk */
  3119. allocated = map_len;
  3120. }
  3121. } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
  3122. (map_len < ee_len) && /*L1*/
  3123. ex < EXT_LAST_EXTENT(eh)) { /*L2*/
  3124. /* See if we can merge right */
  3125. ext4_lblk_t next_lblk;
  3126. ext4_fsblk_t next_pblk, ee_pblk;
  3127. unsigned int next_len;
  3128. abut_ex = ex + 1;
  3129. next_lblk = le32_to_cpu(abut_ex->ee_block);
  3130. next_len = ext4_ext_get_actual_len(abut_ex);
  3131. next_pblk = ext4_ext_pblock(abut_ex);
  3132. ee_pblk = ext4_ext_pblock(ex);
  3133. /*
  3134. * A transfer of blocks from 'ex' to 'abut_ex' is allowed
  3135. * upon those conditions:
  3136. * - C1: abut_ex is initialized,
  3137. * - C2: abut_ex is logically abutting ex,
  3138. * - C3: abut_ex is physically abutting ex,
  3139. * - C4: abut_ex can receive the additional blocks without
  3140. * overflowing the (initialized) length limit.
  3141. */
  3142. if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
  3143. ((map->m_lblk + map_len) == next_lblk) && /*C2*/
  3144. ((ee_pblk + ee_len) == next_pblk) && /*C3*/
  3145. (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
  3146. err = ext4_ext_get_access(handle, inode, path + depth);
  3147. if (err)
  3148. goto out;
  3149. trace_ext4_ext_convert_to_initialized_fastpath(inode,
  3150. map, ex, abut_ex);
  3151. /* Shift the start of abut_ex by 'map_len' blocks */
  3152. abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
  3153. ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
  3154. ex->ee_len = cpu_to_le16(ee_len - map_len);
  3155. ext4_ext_mark_unwritten(ex); /* Restore the flag */
  3156. /* Extend abut_ex by 'map_len' blocks */
  3157. abut_ex->ee_len = cpu_to_le16(next_len + map_len);
  3158. /* Result: number of initialized blocks past m_lblk */
  3159. allocated = map_len;
  3160. }
  3161. }
  3162. if (allocated) {
  3163. /* Mark the block containing both extents as dirty */
  3164. ext4_ext_dirty(handle, inode, path + depth);
  3165. /* Update path to point to the right extent */
  3166. path[depth].p_ext = abut_ex;
  3167. goto out;
  3168. } else
  3169. allocated = ee_len - (map->m_lblk - ee_block);
  3170. WARN_ON(map->m_lblk < ee_block);
  3171. /*
  3172. * It is safe to convert extent to initialized via explicit
  3173. * zeroout only if extent is fully inside i_size or new_size.
  3174. */
  3175. split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
  3176. if (EXT4_EXT_MAY_ZEROOUT & split_flag)
  3177. max_zeroout = sbi->s_extent_max_zeroout_kb >>
  3178. (inode->i_sb->s_blocksize_bits - 10);
  3179. if (ext4_encrypted_inode(inode))
  3180. max_zeroout = 0;
  3181. /*
  3182. * five cases:
  3183. * 1. split the extent into three extents.
  3184. * 2. split the extent into two extents, zeroout the head of the first
  3185. * extent.
  3186. * 3. split the extent into two extents, zeroout the tail of the second
  3187. * extent.
  3188. * 4. split the extent into two extents with out zeroout.
  3189. * 5. no splitting needed, just possibly zeroout the head and / or the
  3190. * tail of the extent.
  3191. */
  3192. split_map.m_lblk = map->m_lblk;
  3193. split_map.m_len = map->m_len;
  3194. if (max_zeroout && (allocated > split_map.m_len)) {
  3195. if (allocated <= max_zeroout) {
  3196. /* case 3 or 5 */
  3197. zero_ex1.ee_block =
  3198. cpu_to_le32(split_map.m_lblk +
  3199. split_map.m_len);
  3200. zero_ex1.ee_len =
  3201. cpu_to_le16(allocated - split_map.m_len);
  3202. ext4_ext_store_pblock(&zero_ex1,
  3203. ext4_ext_pblock(ex) + split_map.m_lblk +
  3204. split_map.m_len - ee_block);
  3205. err = ext4_ext_zeroout(inode, &zero_ex1);
  3206. if (err)
  3207. goto out;
  3208. split_map.m_len = allocated;
  3209. }
  3210. if (split_map.m_lblk - ee_block + split_map.m_len <
  3211. max_zeroout) {
  3212. /* case 2 or 5 */
  3213. if (split_map.m_lblk != ee_block) {
  3214. zero_ex2.ee_block = ex->ee_block;
  3215. zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
  3216. ee_block);
  3217. ext4_ext_store_pblock(&zero_ex2,
  3218. ext4_ext_pblock(ex));
  3219. err = ext4_ext_zeroout(inode, &zero_ex2);
  3220. if (err)
  3221. goto out;
  3222. }
  3223. split_map.m_len += split_map.m_lblk - ee_block;
  3224. split_map.m_lblk = ee_block;
  3225. allocated = map->m_len;
  3226. }
  3227. }
  3228. err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
  3229. flags);
  3230. if (err > 0)
  3231. err = 0;
  3232. out:
  3233. /* If we have gotten a failure, don't zero out status tree */
  3234. if (!err) {
  3235. err = ext4_zeroout_es(inode, &zero_ex1);
  3236. if (!err)
  3237. err = ext4_zeroout_es(inode, &zero_ex2);
  3238. }
  3239. return err ? err : allocated;
  3240. }
  3241. /*
  3242. * This function is called by ext4_ext_map_blocks() from
  3243. * ext4_get_blocks_dio_write() when DIO to write
  3244. * to an unwritten extent.
  3245. *
  3246. * Writing to an unwritten extent may result in splitting the unwritten
  3247. * extent into multiple initialized/unwritten extents (up to three)
  3248. * There are three possibilities:
  3249. * a> There is no split required: Entire extent should be unwritten
  3250. * b> Splits in two extents: Write is happening at either end of the extent
  3251. * c> Splits in three extents: Somone is writing in middle of the extent
  3252. *
  3253. * This works the same way in the case of initialized -> unwritten conversion.
  3254. *
  3255. * One of more index blocks maybe needed if the extent tree grow after
  3256. * the unwritten extent split. To prevent ENOSPC occur at the IO
  3257. * complete, we need to split the unwritten extent before DIO submit
  3258. * the IO. The unwritten extent called at this time will be split
  3259. * into three unwritten extent(at most). After IO complete, the part
  3260. * being filled will be convert to initialized by the end_io callback function
  3261. * via ext4_convert_unwritten_extents().
  3262. *
  3263. * Returns the size of unwritten extent to be written on success.
  3264. */
  3265. static int ext4_split_convert_extents(handle_t *handle,
  3266. struct inode *inode,
  3267. struct ext4_map_blocks *map,
  3268. struct ext4_ext_path **ppath,
  3269. int flags)
  3270. {
  3271. struct ext4_ext_path *path = *ppath;
  3272. ext4_lblk_t eof_block;
  3273. ext4_lblk_t ee_block;
  3274. struct ext4_extent *ex;
  3275. unsigned int ee_len;
  3276. int split_flag = 0, depth;
  3277. ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
  3278. __func__, inode->i_ino,
  3279. (unsigned long long)map->m_lblk, map->m_len);
  3280. eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  3281. inode->i_sb->s_blocksize_bits;
  3282. if (eof_block < map->m_lblk + map->m_len)
  3283. eof_block = map->m_lblk + map->m_len;
  3284. /*
  3285. * It is safe to convert extent to initialized via explicit
  3286. * zeroout only if extent is fully insde i_size or new_size.
  3287. */
  3288. depth = ext_depth(inode);
  3289. ex = path[depth].p_ext;
  3290. ee_block = le32_to_cpu(ex->ee_block);
  3291. ee_len = ext4_ext_get_actual_len(ex);
  3292. /* Convert to unwritten */
  3293. if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
  3294. split_flag |= EXT4_EXT_DATA_VALID1;
  3295. /* Convert to initialized */
  3296. } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
  3297. split_flag |= ee_block + ee_len <= eof_block ?
  3298. EXT4_EXT_MAY_ZEROOUT : 0;
  3299. split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
  3300. }
  3301. flags |= EXT4_GET_BLOCKS_PRE_IO;
  3302. return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
  3303. }
  3304. static int ext4_convert_unwritten_extents_endio(handle_t *handle,
  3305. struct inode *inode,
  3306. struct ext4_map_blocks *map,
  3307. struct ext4_ext_path **ppath)
  3308. {
  3309. struct ext4_ext_path *path = *ppath;
  3310. struct ext4_extent *ex;
  3311. ext4_lblk_t ee_block;
  3312. unsigned int ee_len;
  3313. int depth;
  3314. int err = 0;
  3315. depth = ext_depth(inode);
  3316. ex = path[depth].p_ext;
  3317. ee_block = le32_to_cpu(ex->ee_block);
  3318. ee_len = ext4_ext_get_actual_len(ex);
  3319. ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
  3320. "block %llu, max_blocks %u\n", inode->i_ino,
  3321. (unsigned long long)ee_block, ee_len);
  3322. /* If extent is larger than requested it is a clear sign that we still
  3323. * have some extent state machine issues left. So extent_split is still
  3324. * required.
  3325. * TODO: Once all related issues will be fixed this situation should be
  3326. * illegal.
  3327. */
  3328. if (ee_block != map->m_lblk || ee_len > map->m_len) {
  3329. #ifdef EXT4_DEBUG
  3330. ext4_warning("Inode (%ld) finished: extent logical block %llu,"
  3331. " len %u; IO logical block %llu, len %u",
  3332. inode->i_ino, (unsigned long long)ee_block, ee_len,
  3333. (unsigned long long)map->m_lblk, map->m_len);
  3334. #endif
  3335. err = ext4_split_convert_extents(handle, inode, map, ppath,
  3336. EXT4_GET_BLOCKS_CONVERT);
  3337. if (err < 0)
  3338. return err;
  3339. path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
  3340. if (IS_ERR(path))
  3341. return PTR_ERR(path);
  3342. depth = ext_depth(inode);
  3343. ex = path[depth].p_ext;
  3344. }
  3345. err = ext4_ext_get_access(handle, inode, path + depth);
  3346. if (err)
  3347. goto out;
  3348. /* first mark the extent as initialized */
  3349. ext4_ext_mark_initialized(ex);
  3350. /* note: ext4_ext_correct_indexes() isn't needed here because
  3351. * borders are not changed
  3352. */
  3353. ext4_ext_try_to_merge(handle, inode, path, ex);
  3354. /* Mark modified extent as dirty */
  3355. err = ext4_ext_dirty(handle, inode, path + path->p_depth);
  3356. out:
  3357. ext4_ext_show_leaf(inode, path);
  3358. return err;
  3359. }
  3360. /*
  3361. * Handle EOFBLOCKS_FL flag, clearing it if necessary
  3362. */
  3363. static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
  3364. ext4_lblk_t lblk,
  3365. struct ext4_ext_path *path,
  3366. unsigned int len)
  3367. {
  3368. int i, depth;
  3369. struct ext4_extent_header *eh;
  3370. struct ext4_extent *last_ex;
  3371. if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
  3372. return 0;
  3373. depth = ext_depth(inode);
  3374. eh = path[depth].p_hdr;
  3375. /*
  3376. * We're going to remove EOFBLOCKS_FL entirely in future so we
  3377. * do not care for this case anymore. Simply remove the flag
  3378. * if there are no extents.
  3379. */
  3380. if (unlikely(!eh->eh_entries))
  3381. goto out;
  3382. last_ex = EXT_LAST_EXTENT(eh);
  3383. /*
  3384. * We should clear the EOFBLOCKS_FL flag if we are writing the
  3385. * last block in the last extent in the file. We test this by
  3386. * first checking to see if the caller to
  3387. * ext4_ext_get_blocks() was interested in the last block (or
  3388. * a block beyond the last block) in the current extent. If
  3389. * this turns out to be false, we can bail out from this
  3390. * function immediately.
  3391. */
  3392. if (lblk + len < le32_to_cpu(last_ex->ee_block) +
  3393. ext4_ext_get_actual_len(last_ex))
  3394. return 0;
  3395. /*
  3396. * If the caller does appear to be planning to write at or
  3397. * beyond the end of the current extent, we then test to see
  3398. * if the current extent is the last extent in the file, by
  3399. * checking to make sure it was reached via the rightmost node
  3400. * at each level of the tree.
  3401. */
  3402. for (i = depth-1; i >= 0; i--)
  3403. if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
  3404. return 0;
  3405. out:
  3406. ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
  3407. return ext4_mark_inode_dirty(handle, inode);
  3408. }
  3409. /**
  3410. * ext4_find_delalloc_range: find delayed allocated block in the given range.
  3411. *
  3412. * Return 1 if there is a delalloc block in the range, otherwise 0.
  3413. */
  3414. int ext4_find_delalloc_range(struct inode *inode,
  3415. ext4_lblk_t lblk_start,
  3416. ext4_lblk_t lblk_end)
  3417. {
  3418. struct extent_status es;
  3419. ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
  3420. if (es.es_len == 0)
  3421. return 0; /* there is no delay extent in this tree */
  3422. else if (es.es_lblk <= lblk_start &&
  3423. lblk_start < es.es_lblk + es.es_len)
  3424. return 1;
  3425. else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
  3426. return 1;
  3427. else
  3428. return 0;
  3429. }
  3430. int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
  3431. {
  3432. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  3433. ext4_lblk_t lblk_start, lblk_end;
  3434. lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
  3435. lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
  3436. return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
  3437. }
  3438. /**
  3439. * Determines how many complete clusters (out of those specified by the 'map')
  3440. * are under delalloc and were reserved quota for.
  3441. * This function is called when we are writing out the blocks that were
  3442. * originally written with their allocation delayed, but then the space was
  3443. * allocated using fallocate() before the delayed allocation could be resolved.
  3444. * The cases to look for are:
  3445. * ('=' indicated delayed allocated blocks
  3446. * '-' indicates non-delayed allocated blocks)
  3447. * (a) partial clusters towards beginning and/or end outside of allocated range
  3448. * are not delalloc'ed.
  3449. * Ex:
  3450. * |----c---=|====c====|====c====|===-c----|
  3451. * |++++++ allocated ++++++|
  3452. * ==> 4 complete clusters in above example
  3453. *
  3454. * (b) partial cluster (outside of allocated range) towards either end is
  3455. * marked for delayed allocation. In this case, we will exclude that
  3456. * cluster.
  3457. * Ex:
  3458. * |----====c========|========c========|
  3459. * |++++++ allocated ++++++|
  3460. * ==> 1 complete clusters in above example
  3461. *
  3462. * Ex:
  3463. * |================c================|
  3464. * |++++++ allocated ++++++|
  3465. * ==> 0 complete clusters in above example
  3466. *
  3467. * The ext4_da_update_reserve_space will be called only if we
  3468. * determine here that there were some "entire" clusters that span
  3469. * this 'allocated' range.
  3470. * In the non-bigalloc case, this function will just end up returning num_blks
  3471. * without ever calling ext4_find_delalloc_range.
  3472. */
  3473. static unsigned int
  3474. get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
  3475. unsigned int num_blks)
  3476. {
  3477. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  3478. ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
  3479. ext4_lblk_t lblk_from, lblk_to, c_offset;
  3480. unsigned int allocated_clusters = 0;
  3481. alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
  3482. alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
  3483. /* max possible clusters for this allocation */
  3484. allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
  3485. trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
  3486. /* Check towards left side */
  3487. c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
  3488. if (c_offset) {
  3489. lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
  3490. lblk_to = lblk_from + c_offset - 1;
  3491. if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
  3492. allocated_clusters--;
  3493. }
  3494. /* Now check towards right. */
  3495. c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
  3496. if (allocated_clusters && c_offset) {
  3497. lblk_from = lblk_start + num_blks;
  3498. lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
  3499. if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
  3500. allocated_clusters--;
  3501. }
  3502. return allocated_clusters;
  3503. }
  3504. static int
  3505. convert_initialized_extent(handle_t *handle, struct inode *inode,
  3506. struct ext4_map_blocks *map,
  3507. struct ext4_ext_path **ppath,
  3508. unsigned int allocated)
  3509. {
  3510. struct ext4_ext_path *path = *ppath;
  3511. struct ext4_extent *ex;
  3512. ext4_lblk_t ee_block;
  3513. unsigned int ee_len;
  3514. int depth;
  3515. int err = 0;
  3516. /*
  3517. * Make sure that the extent is no bigger than we support with
  3518. * unwritten extent
  3519. */
  3520. if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
  3521. map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
  3522. depth = ext_depth(inode);
  3523. ex = path[depth].p_ext;
  3524. ee_block = le32_to_cpu(ex->ee_block);
  3525. ee_len = ext4_ext_get_actual_len(ex);
  3526. ext_debug("%s: inode %lu, logical"
  3527. "block %llu, max_blocks %u\n", __func__, inode->i_ino,
  3528. (unsigned long long)ee_block, ee_len);
  3529. if (ee_block != map->m_lblk || ee_len > map->m_len) {
  3530. err = ext4_split_convert_extents(handle, inode, map, ppath,
  3531. EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
  3532. if (err < 0)
  3533. return err;
  3534. path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
  3535. if (IS_ERR(path))
  3536. return PTR_ERR(path);
  3537. depth = ext_depth(inode);
  3538. ex = path[depth].p_ext;
  3539. if (!ex) {
  3540. EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
  3541. (unsigned long) map->m_lblk);
  3542. return -EFSCORRUPTED;
  3543. }
  3544. }
  3545. err = ext4_ext_get_access(handle, inode, path + depth);
  3546. if (err)
  3547. return err;
  3548. /* first mark the extent as unwritten */
  3549. ext4_ext_mark_unwritten(ex);
  3550. /* note: ext4_ext_correct_indexes() isn't needed here because
  3551. * borders are not changed
  3552. */
  3553. ext4_ext_try_to_merge(handle, inode, path, ex);
  3554. /* Mark modified extent as dirty */
  3555. err = ext4_ext_dirty(handle, inode, path + path->p_depth);
  3556. if (err)
  3557. return err;
  3558. ext4_ext_show_leaf(inode, path);
  3559. ext4_update_inode_fsync_trans(handle, inode, 1);
  3560. err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
  3561. if (err)
  3562. return err;
  3563. map->m_flags |= EXT4_MAP_UNWRITTEN;
  3564. if (allocated > map->m_len)
  3565. allocated = map->m_len;
  3566. map->m_len = allocated;
  3567. return allocated;
  3568. }
  3569. static int
  3570. ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
  3571. struct ext4_map_blocks *map,
  3572. struct ext4_ext_path **ppath, int flags,
  3573. unsigned int allocated, ext4_fsblk_t newblock)
  3574. {
  3575. struct ext4_ext_path *path = *ppath;
  3576. int ret = 0;
  3577. int err = 0;
  3578. ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
  3579. "block %llu, max_blocks %u, flags %x, allocated %u\n",
  3580. inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
  3581. flags, allocated);
  3582. ext4_ext_show_leaf(inode, path);
  3583. /*
  3584. * When writing into unwritten space, we should not fail to
  3585. * allocate metadata blocks for the new extent block if needed.
  3586. */
  3587. flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
  3588. trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
  3589. allocated, newblock);
  3590. /* get_block() before submit the IO, split the extent */
  3591. if (flags & EXT4_GET_BLOCKS_PRE_IO) {
  3592. ret = ext4_split_convert_extents(handle, inode, map, ppath,
  3593. flags | EXT4_GET_BLOCKS_CONVERT);
  3594. if (ret <= 0)
  3595. goto out;
  3596. map->m_flags |= EXT4_MAP_UNWRITTEN;
  3597. goto out;
  3598. }
  3599. /* IO end_io complete, convert the filled extent to written */
  3600. if (flags & EXT4_GET_BLOCKS_CONVERT) {
  3601. if (flags & EXT4_GET_BLOCKS_ZERO) {
  3602. if (allocated > map->m_len)
  3603. allocated = map->m_len;
  3604. err = ext4_issue_zeroout(inode, map->m_lblk, newblock,
  3605. allocated);
  3606. if (err < 0)
  3607. goto out2;
  3608. }
  3609. ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
  3610. ppath);
  3611. if (ret >= 0) {
  3612. ext4_update_inode_fsync_trans(handle, inode, 1);
  3613. err = check_eofblocks_fl(handle, inode, map->m_lblk,
  3614. path, map->m_len);
  3615. } else
  3616. err = ret;
  3617. map->m_flags |= EXT4_MAP_MAPPED;
  3618. map->m_pblk = newblock;
  3619. if (allocated > map->m_len)
  3620. allocated = map->m_len;
  3621. map->m_len = allocated;
  3622. goto out2;
  3623. }
  3624. /* buffered IO case */
  3625. /*
  3626. * repeat fallocate creation request
  3627. * we already have an unwritten extent
  3628. */
  3629. if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
  3630. map->m_flags |= EXT4_MAP_UNWRITTEN;
  3631. goto map_out;
  3632. }
  3633. /* buffered READ or buffered write_begin() lookup */
  3634. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
  3635. /*
  3636. * We have blocks reserved already. We
  3637. * return allocated blocks so that delalloc
  3638. * won't do block reservation for us. But
  3639. * the buffer head will be unmapped so that
  3640. * a read from the block returns 0s.
  3641. */
  3642. map->m_flags |= EXT4_MAP_UNWRITTEN;
  3643. goto out1;
  3644. }
  3645. /* buffered write, writepage time, convert*/
  3646. ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
  3647. if (ret >= 0)
  3648. ext4_update_inode_fsync_trans(handle, inode, 1);
  3649. out:
  3650. if (ret <= 0) {
  3651. err = ret;
  3652. goto out2;
  3653. } else
  3654. allocated = ret;
  3655. map->m_flags |= EXT4_MAP_NEW;
  3656. /*
  3657. * if we allocated more blocks than requested
  3658. * we need to make sure we unmap the extra block
  3659. * allocated. The actual needed block will get
  3660. * unmapped later when we find the buffer_head marked
  3661. * new.
  3662. */
  3663. if (allocated > map->m_len) {
  3664. clean_bdev_aliases(inode->i_sb->s_bdev, newblock + map->m_len,
  3665. allocated - map->m_len);
  3666. allocated = map->m_len;
  3667. }
  3668. map->m_len = allocated;
  3669. /*
  3670. * If we have done fallocate with the offset that is already
  3671. * delayed allocated, we would have block reservation
  3672. * and quota reservation done in the delayed write path.
  3673. * But fallocate would have already updated quota and block
  3674. * count for this offset. So cancel these reservation
  3675. */
  3676. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
  3677. unsigned int reserved_clusters;
  3678. reserved_clusters = get_reserved_cluster_alloc(inode,
  3679. map->m_lblk, map->m_len);
  3680. if (reserved_clusters)
  3681. ext4_da_update_reserve_space(inode,
  3682. reserved_clusters,
  3683. 0);
  3684. }
  3685. map_out:
  3686. map->m_flags |= EXT4_MAP_MAPPED;
  3687. if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
  3688. err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
  3689. map->m_len);
  3690. if (err < 0)
  3691. goto out2;
  3692. }
  3693. out1:
  3694. if (allocated > map->m_len)
  3695. allocated = map->m_len;
  3696. ext4_ext_show_leaf(inode, path);
  3697. map->m_pblk = newblock;
  3698. map->m_len = allocated;
  3699. out2:
  3700. return err ? err : allocated;
  3701. }
  3702. /*
  3703. * get_implied_cluster_alloc - check to see if the requested
  3704. * allocation (in the map structure) overlaps with a cluster already
  3705. * allocated in an extent.
  3706. * @sb The filesystem superblock structure
  3707. * @map The requested lblk->pblk mapping
  3708. * @ex The extent structure which might contain an implied
  3709. * cluster allocation
  3710. *
  3711. * This function is called by ext4_ext_map_blocks() after we failed to
  3712. * find blocks that were already in the inode's extent tree. Hence,
  3713. * we know that the beginning of the requested region cannot overlap
  3714. * the extent from the inode's extent tree. There are three cases we
  3715. * want to catch. The first is this case:
  3716. *
  3717. * |--- cluster # N--|
  3718. * |--- extent ---| |---- requested region ---|
  3719. * |==========|
  3720. *
  3721. * The second case that we need to test for is this one:
  3722. *
  3723. * |--------- cluster # N ----------------|
  3724. * |--- requested region --| |------- extent ----|
  3725. * |=======================|
  3726. *
  3727. * The third case is when the requested region lies between two extents
  3728. * within the same cluster:
  3729. * |------------- cluster # N-------------|
  3730. * |----- ex -----| |---- ex_right ----|
  3731. * |------ requested region ------|
  3732. * |================|
  3733. *
  3734. * In each of the above cases, we need to set the map->m_pblk and
  3735. * map->m_len so it corresponds to the return the extent labelled as
  3736. * "|====|" from cluster #N, since it is already in use for data in
  3737. * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
  3738. * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
  3739. * as a new "allocated" block region. Otherwise, we will return 0 and
  3740. * ext4_ext_map_blocks() will then allocate one or more new clusters
  3741. * by calling ext4_mb_new_blocks().
  3742. */
  3743. static int get_implied_cluster_alloc(struct super_block *sb,
  3744. struct ext4_map_blocks *map,
  3745. struct ext4_extent *ex,
  3746. struct ext4_ext_path *path)
  3747. {
  3748. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3749. ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
  3750. ext4_lblk_t ex_cluster_start, ex_cluster_end;
  3751. ext4_lblk_t rr_cluster_start;
  3752. ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
  3753. ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
  3754. unsigned short ee_len = ext4_ext_get_actual_len(ex);
  3755. /* The extent passed in that we are trying to match */
  3756. ex_cluster_start = EXT4_B2C(sbi, ee_block);
  3757. ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
  3758. /* The requested region passed into ext4_map_blocks() */
  3759. rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
  3760. if ((rr_cluster_start == ex_cluster_end) ||
  3761. (rr_cluster_start == ex_cluster_start)) {
  3762. if (rr_cluster_start == ex_cluster_end)
  3763. ee_start += ee_len - 1;
  3764. map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
  3765. map->m_len = min(map->m_len,
  3766. (unsigned) sbi->s_cluster_ratio - c_offset);
  3767. /*
  3768. * Check for and handle this case:
  3769. *
  3770. * |--------- cluster # N-------------|
  3771. * |------- extent ----|
  3772. * |--- requested region ---|
  3773. * |===========|
  3774. */
  3775. if (map->m_lblk < ee_block)
  3776. map->m_len = min(map->m_len, ee_block - map->m_lblk);
  3777. /*
  3778. * Check for the case where there is already another allocated
  3779. * block to the right of 'ex' but before the end of the cluster.
  3780. *
  3781. * |------------- cluster # N-------------|
  3782. * |----- ex -----| |---- ex_right ----|
  3783. * |------ requested region ------|
  3784. * |================|
  3785. */
  3786. if (map->m_lblk > ee_block) {
  3787. ext4_lblk_t next = ext4_ext_next_allocated_block(path);
  3788. map->m_len = min(map->m_len, next - map->m_lblk);
  3789. }
  3790. trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
  3791. return 1;
  3792. }
  3793. trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
  3794. return 0;
  3795. }
  3796. /*
  3797. * Block allocation/map/preallocation routine for extents based files
  3798. *
  3799. *
  3800. * Need to be called with
  3801. * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
  3802. * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
  3803. *
  3804. * return > 0, number of of blocks already mapped/allocated
  3805. * if create == 0 and these are pre-allocated blocks
  3806. * buffer head is unmapped
  3807. * otherwise blocks are mapped
  3808. *
  3809. * return = 0, if plain look up failed (blocks have not been allocated)
  3810. * buffer head is unmapped
  3811. *
  3812. * return < 0, error case.
  3813. */
  3814. int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
  3815. struct ext4_map_blocks *map, int flags)
  3816. {
  3817. struct ext4_ext_path *path = NULL;
  3818. struct ext4_extent newex, *ex, *ex2;
  3819. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  3820. ext4_fsblk_t newblock = 0;
  3821. int free_on_err = 0, err = 0, depth, ret;
  3822. unsigned int allocated = 0, offset = 0;
  3823. unsigned int allocated_clusters = 0;
  3824. struct ext4_allocation_request ar;
  3825. ext4_lblk_t cluster_offset;
  3826. bool map_from_cluster = false;
  3827. ext_debug("blocks %u/%u requested for inode %lu\n",
  3828. map->m_lblk, map->m_len, inode->i_ino);
  3829. trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
  3830. /* find extent for this block */
  3831. path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
  3832. if (IS_ERR(path)) {
  3833. err = PTR_ERR(path);
  3834. path = NULL;
  3835. goto out2;
  3836. }
  3837. depth = ext_depth(inode);
  3838. /*
  3839. * consistent leaf must not be empty;
  3840. * this situation is possible, though, _during_ tree modification;
  3841. * this is why assert can't be put in ext4_find_extent()
  3842. */
  3843. if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
  3844. EXT4_ERROR_INODE(inode, "bad extent address "
  3845. "lblock: %lu, depth: %d pblock %lld",
  3846. (unsigned long) map->m_lblk, depth,
  3847. path[depth].p_block);
  3848. err = -EFSCORRUPTED;
  3849. goto out2;
  3850. }
  3851. ex = path[depth].p_ext;
  3852. if (ex) {
  3853. ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
  3854. ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
  3855. unsigned short ee_len;
  3856. /*
  3857. * unwritten extents are treated as holes, except that
  3858. * we split out initialized portions during a write.
  3859. */
  3860. ee_len = ext4_ext_get_actual_len(ex);
  3861. trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
  3862. /* if found extent covers block, simply return it */
  3863. if (in_range(map->m_lblk, ee_block, ee_len)) {
  3864. newblock = map->m_lblk - ee_block + ee_start;
  3865. /* number of remaining blocks in the extent */
  3866. allocated = ee_len - (map->m_lblk - ee_block);
  3867. ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
  3868. ee_block, ee_len, newblock);
  3869. /*
  3870. * If the extent is initialized check whether the
  3871. * caller wants to convert it to unwritten.
  3872. */
  3873. if ((!ext4_ext_is_unwritten(ex)) &&
  3874. (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
  3875. allocated = convert_initialized_extent(
  3876. handle, inode, map, &path,
  3877. allocated);
  3878. goto out2;
  3879. } else if (!ext4_ext_is_unwritten(ex))
  3880. goto out;
  3881. ret = ext4_ext_handle_unwritten_extents(
  3882. handle, inode, map, &path, flags,
  3883. allocated, newblock);
  3884. if (ret < 0)
  3885. err = ret;
  3886. else
  3887. allocated = ret;
  3888. goto out2;
  3889. }
  3890. }
  3891. /*
  3892. * requested block isn't allocated yet;
  3893. * we couldn't try to create block if create flag is zero
  3894. */
  3895. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
  3896. ext4_lblk_t hole_start, hole_len;
  3897. hole_start = map->m_lblk;
  3898. hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
  3899. /*
  3900. * put just found gap into cache to speed up
  3901. * subsequent requests
  3902. */
  3903. ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
  3904. /* Update hole_len to reflect hole size after map->m_lblk */
  3905. if (hole_start != map->m_lblk)
  3906. hole_len -= map->m_lblk - hole_start;
  3907. map->m_pblk = 0;
  3908. map->m_len = min_t(unsigned int, map->m_len, hole_len);
  3909. goto out2;
  3910. }
  3911. /*
  3912. * Okay, we need to do block allocation.
  3913. */
  3914. newex.ee_block = cpu_to_le32(map->m_lblk);
  3915. cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
  3916. /*
  3917. * If we are doing bigalloc, check to see if the extent returned
  3918. * by ext4_find_extent() implies a cluster we can use.
  3919. */
  3920. if (cluster_offset && ex &&
  3921. get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
  3922. ar.len = allocated = map->m_len;
  3923. newblock = map->m_pblk;
  3924. map_from_cluster = true;
  3925. goto got_allocated_blocks;
  3926. }
  3927. /* find neighbour allocated blocks */
  3928. ar.lleft = map->m_lblk;
  3929. err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
  3930. if (err)
  3931. goto out2;
  3932. ar.lright = map->m_lblk;
  3933. ex2 = NULL;
  3934. err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
  3935. if (err)
  3936. goto out2;
  3937. /* Check if the extent after searching to the right implies a
  3938. * cluster we can use. */
  3939. if ((sbi->s_cluster_ratio > 1) && ex2 &&
  3940. get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
  3941. ar.len = allocated = map->m_len;
  3942. newblock = map->m_pblk;
  3943. map_from_cluster = true;
  3944. goto got_allocated_blocks;
  3945. }
  3946. /*
  3947. * See if request is beyond maximum number of blocks we can have in
  3948. * a single extent. For an initialized extent this limit is
  3949. * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
  3950. * EXT_UNWRITTEN_MAX_LEN.
  3951. */
  3952. if (map->m_len > EXT_INIT_MAX_LEN &&
  3953. !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
  3954. map->m_len = EXT_INIT_MAX_LEN;
  3955. else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
  3956. (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
  3957. map->m_len = EXT_UNWRITTEN_MAX_LEN;
  3958. /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
  3959. newex.ee_len = cpu_to_le16(map->m_len);
  3960. err = ext4_ext_check_overlap(sbi, inode, &newex, path);
  3961. if (err)
  3962. allocated = ext4_ext_get_actual_len(&newex);
  3963. else
  3964. allocated = map->m_len;
  3965. /* allocate new block */
  3966. ar.inode = inode;
  3967. ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
  3968. ar.logical = map->m_lblk;
  3969. /*
  3970. * We calculate the offset from the beginning of the cluster
  3971. * for the logical block number, since when we allocate a
  3972. * physical cluster, the physical block should start at the
  3973. * same offset from the beginning of the cluster. This is
  3974. * needed so that future calls to get_implied_cluster_alloc()
  3975. * work correctly.
  3976. */
  3977. offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
  3978. ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
  3979. ar.goal -= offset;
  3980. ar.logical -= offset;
  3981. if (S_ISREG(inode->i_mode))
  3982. ar.flags = EXT4_MB_HINT_DATA;
  3983. else
  3984. /* disable in-core preallocation for non-regular files */
  3985. ar.flags = 0;
  3986. if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
  3987. ar.flags |= EXT4_MB_HINT_NOPREALLOC;
  3988. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  3989. ar.flags |= EXT4_MB_DELALLOC_RESERVED;
  3990. if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
  3991. ar.flags |= EXT4_MB_USE_RESERVED;
  3992. newblock = ext4_mb_new_blocks(handle, &ar, &err);
  3993. if (!newblock)
  3994. goto out2;
  3995. ext_debug("allocate new block: goal %llu, found %llu/%u\n",
  3996. ar.goal, newblock, allocated);
  3997. free_on_err = 1;
  3998. allocated_clusters = ar.len;
  3999. ar.len = EXT4_C2B(sbi, ar.len) - offset;
  4000. if (ar.len > allocated)
  4001. ar.len = allocated;
  4002. got_allocated_blocks:
  4003. /* try to insert new extent into found leaf and return */
  4004. ext4_ext_store_pblock(&newex, newblock + offset);
  4005. newex.ee_len = cpu_to_le16(ar.len);
  4006. /* Mark unwritten */
  4007. if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
  4008. ext4_ext_mark_unwritten(&newex);
  4009. map->m_flags |= EXT4_MAP_UNWRITTEN;
  4010. }
  4011. err = 0;
  4012. if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
  4013. err = check_eofblocks_fl(handle, inode, map->m_lblk,
  4014. path, ar.len);
  4015. if (!err)
  4016. err = ext4_ext_insert_extent(handle, inode, &path,
  4017. &newex, flags);
  4018. if (err && free_on_err) {
  4019. int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
  4020. EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
  4021. /* free data blocks we just allocated */
  4022. /* not a good idea to call discard here directly,
  4023. * but otherwise we'd need to call it every free() */
  4024. ext4_discard_preallocations(inode);
  4025. ext4_free_blocks(handle, inode, NULL, newblock,
  4026. EXT4_C2B(sbi, allocated_clusters), fb_flags);
  4027. goto out2;
  4028. }
  4029. /* previous routine could use block we allocated */
  4030. newblock = ext4_ext_pblock(&newex);
  4031. allocated = ext4_ext_get_actual_len(&newex);
  4032. if (allocated > map->m_len)
  4033. allocated = map->m_len;
  4034. map->m_flags |= EXT4_MAP_NEW;
  4035. /*
  4036. * Update reserved blocks/metadata blocks after successful
  4037. * block allocation which had been deferred till now.
  4038. */
  4039. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
  4040. unsigned int reserved_clusters;
  4041. /*
  4042. * Check how many clusters we had reserved this allocated range
  4043. */
  4044. reserved_clusters = get_reserved_cluster_alloc(inode,
  4045. map->m_lblk, allocated);
  4046. if (!map_from_cluster) {
  4047. BUG_ON(allocated_clusters < reserved_clusters);
  4048. if (reserved_clusters < allocated_clusters) {
  4049. struct ext4_inode_info *ei = EXT4_I(inode);
  4050. int reservation = allocated_clusters -
  4051. reserved_clusters;
  4052. /*
  4053. * It seems we claimed few clusters outside of
  4054. * the range of this allocation. We should give
  4055. * it back to the reservation pool. This can
  4056. * happen in the following case:
  4057. *
  4058. * * Suppose s_cluster_ratio is 4 (i.e., each
  4059. * cluster has 4 blocks. Thus, the clusters
  4060. * are [0-3],[4-7],[8-11]...
  4061. * * First comes delayed allocation write for
  4062. * logical blocks 10 & 11. Since there were no
  4063. * previous delayed allocated blocks in the
  4064. * range [8-11], we would reserve 1 cluster
  4065. * for this write.
  4066. * * Next comes write for logical blocks 3 to 8.
  4067. * In this case, we will reserve 2 clusters
  4068. * (for [0-3] and [4-7]; and not for [8-11] as
  4069. * that range has a delayed allocated blocks.
  4070. * Thus total reserved clusters now becomes 3.
  4071. * * Now, during the delayed allocation writeout
  4072. * time, we will first write blocks [3-8] and
  4073. * allocate 3 clusters for writing these
  4074. * blocks. Also, we would claim all these
  4075. * three clusters above.
  4076. * * Now when we come here to writeout the
  4077. * blocks [10-11], we would expect to claim
  4078. * the reservation of 1 cluster we had made
  4079. * (and we would claim it since there are no
  4080. * more delayed allocated blocks in the range
  4081. * [8-11]. But our reserved cluster count had
  4082. * already gone to 0.
  4083. *
  4084. * Thus, at the step 4 above when we determine
  4085. * that there are still some unwritten delayed
  4086. * allocated blocks outside of our current
  4087. * block range, we should increment the
  4088. * reserved clusters count so that when the
  4089. * remaining blocks finally gets written, we
  4090. * could claim them.
  4091. */
  4092. dquot_reserve_block(inode,
  4093. EXT4_C2B(sbi, reservation));
  4094. spin_lock(&ei->i_block_reservation_lock);
  4095. ei->i_reserved_data_blocks += reservation;
  4096. spin_unlock(&ei->i_block_reservation_lock);
  4097. }
  4098. /*
  4099. * We will claim quota for all newly allocated blocks.
  4100. * We're updating the reserved space *after* the
  4101. * correction above so we do not accidentally free
  4102. * all the metadata reservation because we might
  4103. * actually need it later on.
  4104. */
  4105. ext4_da_update_reserve_space(inode, allocated_clusters,
  4106. 1);
  4107. }
  4108. }
  4109. /*
  4110. * Cache the extent and update transaction to commit on fdatasync only
  4111. * when it is _not_ an unwritten extent.
  4112. */
  4113. if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
  4114. ext4_update_inode_fsync_trans(handle, inode, 1);
  4115. else
  4116. ext4_update_inode_fsync_trans(handle, inode, 0);
  4117. out:
  4118. if (allocated > map->m_len)
  4119. allocated = map->m_len;
  4120. ext4_ext_show_leaf(inode, path);
  4121. map->m_flags |= EXT4_MAP_MAPPED;
  4122. map->m_pblk = newblock;
  4123. map->m_len = allocated;
  4124. out2:
  4125. ext4_ext_drop_refs(path);
  4126. kfree(path);
  4127. trace_ext4_ext_map_blocks_exit(inode, flags, map,
  4128. err ? err : allocated);
  4129. return err ? err : allocated;
  4130. }
  4131. int ext4_ext_truncate(handle_t *handle, struct inode *inode)
  4132. {
  4133. struct super_block *sb = inode->i_sb;
  4134. ext4_lblk_t last_block;
  4135. int err = 0;
  4136. /*
  4137. * TODO: optimization is possible here.
  4138. * Probably we need not scan at all,
  4139. * because page truncation is enough.
  4140. */
  4141. /* we have to know where to truncate from in crash case */
  4142. EXT4_I(inode)->i_disksize = inode->i_size;
  4143. err = ext4_mark_inode_dirty(handle, inode);
  4144. if (err)
  4145. return err;
  4146. last_block = (inode->i_size + sb->s_blocksize - 1)
  4147. >> EXT4_BLOCK_SIZE_BITS(sb);
  4148. retry:
  4149. err = ext4_es_remove_extent(inode, last_block,
  4150. EXT_MAX_BLOCKS - last_block);
  4151. if (err == -ENOMEM) {
  4152. cond_resched();
  4153. congestion_wait(BLK_RW_ASYNC, HZ/50);
  4154. goto retry;
  4155. }
  4156. if (err)
  4157. return err;
  4158. return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
  4159. }
  4160. static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
  4161. ext4_lblk_t len, loff_t new_size,
  4162. int flags)
  4163. {
  4164. struct inode *inode = file_inode(file);
  4165. handle_t *handle;
  4166. int ret = 0;
  4167. int ret2 = 0;
  4168. int retries = 0;
  4169. int depth = 0;
  4170. struct ext4_map_blocks map;
  4171. unsigned int credits;
  4172. loff_t epos;
  4173. BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
  4174. map.m_lblk = offset;
  4175. map.m_len = len;
  4176. /*
  4177. * Don't normalize the request if it can fit in one extent so
  4178. * that it doesn't get unnecessarily split into multiple
  4179. * extents.
  4180. */
  4181. if (len <= EXT_UNWRITTEN_MAX_LEN)
  4182. flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
  4183. /*
  4184. * credits to insert 1 extent into extent tree
  4185. */
  4186. credits = ext4_chunk_trans_blocks(inode, len);
  4187. depth = ext_depth(inode);
  4188. retry:
  4189. while (ret >= 0 && len) {
  4190. /*
  4191. * Recalculate credits when extent tree depth changes.
  4192. */
  4193. if (depth != ext_depth(inode)) {
  4194. credits = ext4_chunk_trans_blocks(inode, len);
  4195. depth = ext_depth(inode);
  4196. }
  4197. handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
  4198. credits);
  4199. if (IS_ERR(handle)) {
  4200. ret = PTR_ERR(handle);
  4201. break;
  4202. }
  4203. ret = ext4_map_blocks(handle, inode, &map, flags);
  4204. if (ret <= 0) {
  4205. ext4_debug("inode #%lu: block %u: len %u: "
  4206. "ext4_ext_map_blocks returned %d",
  4207. inode->i_ino, map.m_lblk,
  4208. map.m_len, ret);
  4209. ext4_mark_inode_dirty(handle, inode);
  4210. ret2 = ext4_journal_stop(handle);
  4211. break;
  4212. }
  4213. map.m_lblk += ret;
  4214. map.m_len = len = len - ret;
  4215. epos = (loff_t)map.m_lblk << inode->i_blkbits;
  4216. inode->i_ctime = current_time(inode);
  4217. if (new_size) {
  4218. if (epos > new_size)
  4219. epos = new_size;
  4220. if (ext4_update_inode_size(inode, epos) & 0x1)
  4221. inode->i_mtime = inode->i_ctime;
  4222. } else {
  4223. if (epos > inode->i_size)
  4224. ext4_set_inode_flag(inode,
  4225. EXT4_INODE_EOFBLOCKS);
  4226. }
  4227. ext4_mark_inode_dirty(handle, inode);
  4228. ret2 = ext4_journal_stop(handle);
  4229. if (ret2)
  4230. break;
  4231. }
  4232. if (ret == -ENOSPC &&
  4233. ext4_should_retry_alloc(inode->i_sb, &retries)) {
  4234. ret = 0;
  4235. goto retry;
  4236. }
  4237. return ret > 0 ? ret2 : ret;
  4238. }
  4239. static long ext4_zero_range(struct file *file, loff_t offset,
  4240. loff_t len, int mode)
  4241. {
  4242. struct inode *inode = file_inode(file);
  4243. handle_t *handle = NULL;
  4244. unsigned int max_blocks;
  4245. loff_t new_size = 0;
  4246. int ret = 0;
  4247. int flags;
  4248. int credits;
  4249. int partial_begin, partial_end;
  4250. loff_t start, end;
  4251. ext4_lblk_t lblk;
  4252. unsigned int blkbits = inode->i_blkbits;
  4253. trace_ext4_zero_range(inode, offset, len, mode);
  4254. if (!S_ISREG(inode->i_mode))
  4255. return -EINVAL;
  4256. /* Call ext4_force_commit to flush all data in case of data=journal. */
  4257. if (ext4_should_journal_data(inode)) {
  4258. ret = ext4_force_commit(inode->i_sb);
  4259. if (ret)
  4260. return ret;
  4261. }
  4262. /*
  4263. * Round up offset. This is not fallocate, we neet to zero out
  4264. * blocks, so convert interior block aligned part of the range to
  4265. * unwritten and possibly manually zero out unaligned parts of the
  4266. * range.
  4267. */
  4268. start = round_up(offset, 1 << blkbits);
  4269. end = round_down((offset + len), 1 << blkbits);
  4270. if (start < offset || end > offset + len)
  4271. return -EINVAL;
  4272. partial_begin = offset & ((1 << blkbits) - 1);
  4273. partial_end = (offset + len) & ((1 << blkbits) - 1);
  4274. lblk = start >> blkbits;
  4275. max_blocks = (end >> blkbits);
  4276. if (max_blocks < lblk)
  4277. max_blocks = 0;
  4278. else
  4279. max_blocks -= lblk;
  4280. inode_lock(inode);
  4281. /*
  4282. * Indirect files do not support unwritten extnets
  4283. */
  4284. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  4285. ret = -EOPNOTSUPP;
  4286. goto out_mutex;
  4287. }
  4288. if (!(mode & FALLOC_FL_KEEP_SIZE) &&
  4289. offset + len > i_size_read(inode)) {
  4290. new_size = offset + len;
  4291. ret = inode_newsize_ok(inode, new_size);
  4292. if (ret)
  4293. goto out_mutex;
  4294. }
  4295. flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
  4296. if (mode & FALLOC_FL_KEEP_SIZE)
  4297. flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
  4298. /* Wait all existing dio workers, newcomers will block on i_mutex */
  4299. ext4_inode_block_unlocked_dio(inode);
  4300. inode_dio_wait(inode);
  4301. /* Preallocate the range including the unaligned edges */
  4302. if (partial_begin || partial_end) {
  4303. ret = ext4_alloc_file_blocks(file,
  4304. round_down(offset, 1 << blkbits) >> blkbits,
  4305. (round_up((offset + len), 1 << blkbits) -
  4306. round_down(offset, 1 << blkbits)) >> blkbits,
  4307. new_size, flags);
  4308. if (ret)
  4309. goto out_dio;
  4310. }
  4311. /* Zero range excluding the unaligned edges */
  4312. if (max_blocks > 0) {
  4313. flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
  4314. EXT4_EX_NOCACHE);
  4315. /*
  4316. * Prevent page faults from reinstantiating pages we have
  4317. * released from page cache.
  4318. */
  4319. down_write(&EXT4_I(inode)->i_mmap_sem);
  4320. ret = ext4_update_disksize_before_punch(inode, offset, len);
  4321. if (ret) {
  4322. up_write(&EXT4_I(inode)->i_mmap_sem);
  4323. goto out_dio;
  4324. }
  4325. /* Now release the pages and zero block aligned part of pages */
  4326. truncate_pagecache_range(inode, start, end - 1);
  4327. inode->i_mtime = inode->i_ctime = current_time(inode);
  4328. ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
  4329. flags);
  4330. up_write(&EXT4_I(inode)->i_mmap_sem);
  4331. if (ret)
  4332. goto out_dio;
  4333. }
  4334. if (!partial_begin && !partial_end)
  4335. goto out_dio;
  4336. /*
  4337. * In worst case we have to writeout two nonadjacent unwritten
  4338. * blocks and update the inode
  4339. */
  4340. credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
  4341. if (ext4_should_journal_data(inode))
  4342. credits += 2;
  4343. handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
  4344. if (IS_ERR(handle)) {
  4345. ret = PTR_ERR(handle);
  4346. ext4_std_error(inode->i_sb, ret);
  4347. goto out_dio;
  4348. }
  4349. inode->i_mtime = inode->i_ctime = current_time(inode);
  4350. if (new_size) {
  4351. ext4_update_inode_size(inode, new_size);
  4352. } else {
  4353. /*
  4354. * Mark that we allocate beyond EOF so the subsequent truncate
  4355. * can proceed even if the new size is the same as i_size.
  4356. */
  4357. if ((offset + len) > i_size_read(inode))
  4358. ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
  4359. }
  4360. ext4_mark_inode_dirty(handle, inode);
  4361. /* Zero out partial block at the edges of the range */
  4362. ret = ext4_zero_partial_blocks(handle, inode, offset, len);
  4363. if (ret >= 0)
  4364. ext4_update_inode_fsync_trans(handle, inode, 1);
  4365. if (file->f_flags & O_SYNC)
  4366. ext4_handle_sync(handle);
  4367. ext4_journal_stop(handle);
  4368. out_dio:
  4369. ext4_inode_resume_unlocked_dio(inode);
  4370. out_mutex:
  4371. inode_unlock(inode);
  4372. return ret;
  4373. }
  4374. /*
  4375. * preallocate space for a file. This implements ext4's fallocate file
  4376. * operation, which gets called from sys_fallocate system call.
  4377. * For block-mapped files, posix_fallocate should fall back to the method
  4378. * of writing zeroes to the required new blocks (the same behavior which is
  4379. * expected for file systems which do not support fallocate() system call).
  4380. */
  4381. long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
  4382. {
  4383. struct inode *inode = file_inode(file);
  4384. loff_t new_size = 0;
  4385. unsigned int max_blocks;
  4386. int ret = 0;
  4387. int flags;
  4388. ext4_lblk_t lblk;
  4389. unsigned int blkbits = inode->i_blkbits;
  4390. /*
  4391. * Encrypted inodes can't handle collapse range or insert
  4392. * range since we would need to re-encrypt blocks with a
  4393. * different IV or XTS tweak (which are based on the logical
  4394. * block number).
  4395. *
  4396. * XXX It's not clear why zero range isn't working, but we'll
  4397. * leave it disabled for encrypted inodes for now. This is a
  4398. * bug we should fix....
  4399. */
  4400. if (ext4_encrypted_inode(inode) &&
  4401. (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
  4402. FALLOC_FL_ZERO_RANGE)))
  4403. return -EOPNOTSUPP;
  4404. /* Return error if mode is not supported */
  4405. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
  4406. FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
  4407. FALLOC_FL_INSERT_RANGE))
  4408. return -EOPNOTSUPP;
  4409. if (mode & FALLOC_FL_PUNCH_HOLE)
  4410. return ext4_punch_hole(inode, offset, len);
  4411. ret = ext4_convert_inline_data(inode);
  4412. if (ret)
  4413. return ret;
  4414. if (mode & FALLOC_FL_COLLAPSE_RANGE)
  4415. return ext4_collapse_range(inode, offset, len);
  4416. if (mode & FALLOC_FL_INSERT_RANGE)
  4417. return ext4_insert_range(inode, offset, len);
  4418. if (mode & FALLOC_FL_ZERO_RANGE)
  4419. return ext4_zero_range(file, offset, len, mode);
  4420. trace_ext4_fallocate_enter(inode, offset, len, mode);
  4421. lblk = offset >> blkbits;
  4422. max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
  4423. flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
  4424. if (mode & FALLOC_FL_KEEP_SIZE)
  4425. flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
  4426. inode_lock(inode);
  4427. /*
  4428. * We only support preallocation for extent-based files only
  4429. */
  4430. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  4431. ret = -EOPNOTSUPP;
  4432. goto out;
  4433. }
  4434. if (!(mode & FALLOC_FL_KEEP_SIZE) &&
  4435. offset + len > i_size_read(inode)) {
  4436. new_size = offset + len;
  4437. ret = inode_newsize_ok(inode, new_size);
  4438. if (ret)
  4439. goto out;
  4440. }
  4441. /* Wait all existing dio workers, newcomers will block on i_mutex */
  4442. ext4_inode_block_unlocked_dio(inode);
  4443. inode_dio_wait(inode);
  4444. ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
  4445. ext4_inode_resume_unlocked_dio(inode);
  4446. if (ret)
  4447. goto out;
  4448. if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
  4449. ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
  4450. EXT4_I(inode)->i_sync_tid);
  4451. }
  4452. out:
  4453. inode_unlock(inode);
  4454. trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
  4455. return ret;
  4456. }
  4457. /*
  4458. * This function convert a range of blocks to written extents
  4459. * The caller of this function will pass the start offset and the size.
  4460. * all unwritten extents within this range will be converted to
  4461. * written extents.
  4462. *
  4463. * This function is called from the direct IO end io call back
  4464. * function, to convert the fallocated extents after IO is completed.
  4465. * Returns 0 on success.
  4466. */
  4467. int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
  4468. loff_t offset, ssize_t len)
  4469. {
  4470. unsigned int max_blocks;
  4471. int ret = 0;
  4472. int ret2 = 0;
  4473. struct ext4_map_blocks map;
  4474. unsigned int credits, blkbits = inode->i_blkbits;
  4475. map.m_lblk = offset >> blkbits;
  4476. max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
  4477. /*
  4478. * This is somewhat ugly but the idea is clear: When transaction is
  4479. * reserved, everything goes into it. Otherwise we rather start several
  4480. * smaller transactions for conversion of each extent separately.
  4481. */
  4482. if (handle) {
  4483. handle = ext4_journal_start_reserved(handle,
  4484. EXT4_HT_EXT_CONVERT);
  4485. if (IS_ERR(handle))
  4486. return PTR_ERR(handle);
  4487. credits = 0;
  4488. } else {
  4489. /*
  4490. * credits to insert 1 extent into extent tree
  4491. */
  4492. credits = ext4_chunk_trans_blocks(inode, max_blocks);
  4493. }
  4494. while (ret >= 0 && ret < max_blocks) {
  4495. map.m_lblk += ret;
  4496. map.m_len = (max_blocks -= ret);
  4497. if (credits) {
  4498. handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
  4499. credits);
  4500. if (IS_ERR(handle)) {
  4501. ret = PTR_ERR(handle);
  4502. break;
  4503. }
  4504. }
  4505. ret = ext4_map_blocks(handle, inode, &map,
  4506. EXT4_GET_BLOCKS_IO_CONVERT_EXT);
  4507. if (ret <= 0)
  4508. ext4_warning(inode->i_sb,
  4509. "inode #%lu: block %u: len %u: "
  4510. "ext4_ext_map_blocks returned %d",
  4511. inode->i_ino, map.m_lblk,
  4512. map.m_len, ret);
  4513. ext4_mark_inode_dirty(handle, inode);
  4514. if (credits)
  4515. ret2 = ext4_journal_stop(handle);
  4516. if (ret <= 0 || ret2)
  4517. break;
  4518. }
  4519. if (!credits)
  4520. ret2 = ext4_journal_stop(handle);
  4521. return ret > 0 ? ret2 : ret;
  4522. }
  4523. /*
  4524. * If newes is not existing extent (newes->ec_pblk equals zero) find
  4525. * delayed extent at start of newes and update newes accordingly and
  4526. * return start of the next delayed extent.
  4527. *
  4528. * If newes is existing extent (newes->ec_pblk is not equal zero)
  4529. * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
  4530. * extent found. Leave newes unmodified.
  4531. */
  4532. static int ext4_find_delayed_extent(struct inode *inode,
  4533. struct extent_status *newes)
  4534. {
  4535. struct extent_status es;
  4536. ext4_lblk_t block, next_del;
  4537. if (newes->es_pblk == 0) {
  4538. ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
  4539. newes->es_lblk + newes->es_len - 1, &es);
  4540. /*
  4541. * No extent in extent-tree contains block @newes->es_pblk,
  4542. * then the block may stay in 1)a hole or 2)delayed-extent.
  4543. */
  4544. if (es.es_len == 0)
  4545. /* A hole found. */
  4546. return 0;
  4547. if (es.es_lblk > newes->es_lblk) {
  4548. /* A hole found. */
  4549. newes->es_len = min(es.es_lblk - newes->es_lblk,
  4550. newes->es_len);
  4551. return 0;
  4552. }
  4553. newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
  4554. }
  4555. block = newes->es_lblk + newes->es_len;
  4556. ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
  4557. if (es.es_len == 0)
  4558. next_del = EXT_MAX_BLOCKS;
  4559. else
  4560. next_del = es.es_lblk;
  4561. return next_del;
  4562. }
  4563. /* fiemap flags we can handle specified here */
  4564. #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
  4565. static int ext4_xattr_fiemap(struct inode *inode,
  4566. struct fiemap_extent_info *fieinfo)
  4567. {
  4568. __u64 physical = 0;
  4569. __u64 length;
  4570. __u32 flags = FIEMAP_EXTENT_LAST;
  4571. int blockbits = inode->i_sb->s_blocksize_bits;
  4572. int error = 0;
  4573. /* in-inode? */
  4574. if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  4575. struct ext4_iloc iloc;
  4576. int offset; /* offset of xattr in inode */
  4577. error = ext4_get_inode_loc(inode, &iloc);
  4578. if (error)
  4579. return error;
  4580. physical = (__u64)iloc.bh->b_blocknr << blockbits;
  4581. offset = EXT4_GOOD_OLD_INODE_SIZE +
  4582. EXT4_I(inode)->i_extra_isize;
  4583. physical += offset;
  4584. length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
  4585. flags |= FIEMAP_EXTENT_DATA_INLINE;
  4586. brelse(iloc.bh);
  4587. } else { /* external block */
  4588. physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
  4589. length = inode->i_sb->s_blocksize;
  4590. }
  4591. if (physical)
  4592. error = fiemap_fill_next_extent(fieinfo, 0, physical,
  4593. length, flags);
  4594. return (error < 0 ? error : 0);
  4595. }
  4596. int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  4597. __u64 start, __u64 len)
  4598. {
  4599. ext4_lblk_t start_blk;
  4600. int error = 0;
  4601. if (ext4_has_inline_data(inode)) {
  4602. int has_inline = 1;
  4603. error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
  4604. start, len);
  4605. if (has_inline)
  4606. return error;
  4607. }
  4608. if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
  4609. error = ext4_ext_precache(inode);
  4610. if (error)
  4611. return error;
  4612. }
  4613. /* fallback to generic here if not in extents fmt */
  4614. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  4615. return generic_block_fiemap(inode, fieinfo, start, len,
  4616. ext4_get_block);
  4617. if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
  4618. return -EBADR;
  4619. if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
  4620. error = ext4_xattr_fiemap(inode, fieinfo);
  4621. } else {
  4622. ext4_lblk_t len_blks;
  4623. __u64 last_blk;
  4624. start_blk = start >> inode->i_sb->s_blocksize_bits;
  4625. last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
  4626. if (last_blk >= EXT_MAX_BLOCKS)
  4627. last_blk = EXT_MAX_BLOCKS-1;
  4628. len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
  4629. /*
  4630. * Walk the extent tree gathering extent information
  4631. * and pushing extents back to the user.
  4632. */
  4633. error = ext4_fill_fiemap_extents(inode, start_blk,
  4634. len_blks, fieinfo);
  4635. }
  4636. return error;
  4637. }
  4638. /*
  4639. * ext4_access_path:
  4640. * Function to access the path buffer for marking it dirty.
  4641. * It also checks if there are sufficient credits left in the journal handle
  4642. * to update path.
  4643. */
  4644. static int
  4645. ext4_access_path(handle_t *handle, struct inode *inode,
  4646. struct ext4_ext_path *path)
  4647. {
  4648. int credits, err;
  4649. if (!ext4_handle_valid(handle))
  4650. return 0;
  4651. /*
  4652. * Check if need to extend journal credits
  4653. * 3 for leaf, sb, and inode plus 2 (bmap and group
  4654. * descriptor) for each block group; assume two block
  4655. * groups
  4656. */
  4657. if (handle->h_buffer_credits < 7) {
  4658. credits = ext4_writepage_trans_blocks(inode);
  4659. err = ext4_ext_truncate_extend_restart(handle, inode, credits);
  4660. /* EAGAIN is success */
  4661. if (err && err != -EAGAIN)
  4662. return err;
  4663. }
  4664. err = ext4_ext_get_access(handle, inode, path);
  4665. return err;
  4666. }
  4667. /*
  4668. * ext4_ext_shift_path_extents:
  4669. * Shift the extents of a path structure lying between path[depth].p_ext
  4670. * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
  4671. * if it is right shift or left shift operation.
  4672. */
  4673. static int
  4674. ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
  4675. struct inode *inode, handle_t *handle,
  4676. enum SHIFT_DIRECTION SHIFT)
  4677. {
  4678. int depth, err = 0;
  4679. struct ext4_extent *ex_start, *ex_last;
  4680. bool update = 0;
  4681. depth = path->p_depth;
  4682. while (depth >= 0) {
  4683. if (depth == path->p_depth) {
  4684. ex_start = path[depth].p_ext;
  4685. if (!ex_start)
  4686. return -EFSCORRUPTED;
  4687. ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
  4688. err = ext4_access_path(handle, inode, path + depth);
  4689. if (err)
  4690. goto out;
  4691. if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
  4692. update = 1;
  4693. while (ex_start <= ex_last) {
  4694. if (SHIFT == SHIFT_LEFT) {
  4695. le32_add_cpu(&ex_start->ee_block,
  4696. -shift);
  4697. /* Try to merge to the left. */
  4698. if ((ex_start >
  4699. EXT_FIRST_EXTENT(path[depth].p_hdr))
  4700. &&
  4701. ext4_ext_try_to_merge_right(inode,
  4702. path, ex_start - 1))
  4703. ex_last--;
  4704. else
  4705. ex_start++;
  4706. } else {
  4707. le32_add_cpu(&ex_last->ee_block, shift);
  4708. ext4_ext_try_to_merge_right(inode, path,
  4709. ex_last);
  4710. ex_last--;
  4711. }
  4712. }
  4713. err = ext4_ext_dirty(handle, inode, path + depth);
  4714. if (err)
  4715. goto out;
  4716. if (--depth < 0 || !update)
  4717. break;
  4718. }
  4719. /* Update index too */
  4720. err = ext4_access_path(handle, inode, path + depth);
  4721. if (err)
  4722. goto out;
  4723. if (SHIFT == SHIFT_LEFT)
  4724. le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
  4725. else
  4726. le32_add_cpu(&path[depth].p_idx->ei_block, shift);
  4727. err = ext4_ext_dirty(handle, inode, path + depth);
  4728. if (err)
  4729. goto out;
  4730. /* we are done if current index is not a starting index */
  4731. if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
  4732. break;
  4733. depth--;
  4734. }
  4735. out:
  4736. return err;
  4737. }
  4738. /*
  4739. * ext4_ext_shift_extents:
  4740. * All the extents which lies in the range from @start to the last allocated
  4741. * block for the @inode are shifted either towards left or right (depending
  4742. * upon @SHIFT) by @shift blocks.
  4743. * On success, 0 is returned, error otherwise.
  4744. */
  4745. static int
  4746. ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
  4747. ext4_lblk_t start, ext4_lblk_t shift,
  4748. enum SHIFT_DIRECTION SHIFT)
  4749. {
  4750. struct ext4_ext_path *path;
  4751. int ret = 0, depth;
  4752. struct ext4_extent *extent;
  4753. ext4_lblk_t stop, *iterator, ex_start, ex_end;
  4754. /* Let path point to the last extent */
  4755. path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
  4756. EXT4_EX_NOCACHE);
  4757. if (IS_ERR(path))
  4758. return PTR_ERR(path);
  4759. depth = path->p_depth;
  4760. extent = path[depth].p_ext;
  4761. if (!extent)
  4762. goto out;
  4763. stop = le32_to_cpu(extent->ee_block);
  4764. /*
  4765. * In case of left shift, Don't start shifting extents until we make
  4766. * sure the hole is big enough to accommodate the shift.
  4767. */
  4768. if (SHIFT == SHIFT_LEFT) {
  4769. path = ext4_find_extent(inode, start - 1, &path,
  4770. EXT4_EX_NOCACHE);
  4771. if (IS_ERR(path))
  4772. return PTR_ERR(path);
  4773. depth = path->p_depth;
  4774. extent = path[depth].p_ext;
  4775. if (extent) {
  4776. ex_start = le32_to_cpu(extent->ee_block);
  4777. ex_end = le32_to_cpu(extent->ee_block) +
  4778. ext4_ext_get_actual_len(extent);
  4779. } else {
  4780. ex_start = 0;
  4781. ex_end = 0;
  4782. }
  4783. if ((start == ex_start && shift > ex_start) ||
  4784. (shift > start - ex_end)) {
  4785. ext4_ext_drop_refs(path);
  4786. kfree(path);
  4787. return -EINVAL;
  4788. }
  4789. }
  4790. /*
  4791. * In case of left shift, iterator points to start and it is increased
  4792. * till we reach stop. In case of right shift, iterator points to stop
  4793. * and it is decreased till we reach start.
  4794. */
  4795. if (SHIFT == SHIFT_LEFT)
  4796. iterator = &start;
  4797. else
  4798. iterator = &stop;
  4799. /*
  4800. * Its safe to start updating extents. Start and stop are unsigned, so
  4801. * in case of right shift if extent with 0 block is reached, iterator
  4802. * becomes NULL to indicate the end of the loop.
  4803. */
  4804. while (iterator && start <= stop) {
  4805. path = ext4_find_extent(inode, *iterator, &path,
  4806. EXT4_EX_NOCACHE);
  4807. if (IS_ERR(path))
  4808. return PTR_ERR(path);
  4809. depth = path->p_depth;
  4810. extent = path[depth].p_ext;
  4811. if (!extent) {
  4812. EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
  4813. (unsigned long) *iterator);
  4814. return -EFSCORRUPTED;
  4815. }
  4816. if (SHIFT == SHIFT_LEFT && *iterator >
  4817. le32_to_cpu(extent->ee_block)) {
  4818. /* Hole, move to the next extent */
  4819. if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
  4820. path[depth].p_ext++;
  4821. } else {
  4822. *iterator = ext4_ext_next_allocated_block(path);
  4823. continue;
  4824. }
  4825. }
  4826. if (SHIFT == SHIFT_LEFT) {
  4827. extent = EXT_LAST_EXTENT(path[depth].p_hdr);
  4828. *iterator = le32_to_cpu(extent->ee_block) +
  4829. ext4_ext_get_actual_len(extent);
  4830. } else {
  4831. extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
  4832. if (le32_to_cpu(extent->ee_block) > 0)
  4833. *iterator = le32_to_cpu(extent->ee_block) - 1;
  4834. else
  4835. /* Beginning is reached, end of the loop */
  4836. iterator = NULL;
  4837. /* Update path extent in case we need to stop */
  4838. while (le32_to_cpu(extent->ee_block) < start)
  4839. extent++;
  4840. path[depth].p_ext = extent;
  4841. }
  4842. ret = ext4_ext_shift_path_extents(path, shift, inode,
  4843. handle, SHIFT);
  4844. if (ret)
  4845. break;
  4846. }
  4847. out:
  4848. ext4_ext_drop_refs(path);
  4849. kfree(path);
  4850. return ret;
  4851. }
  4852. /*
  4853. * ext4_collapse_range:
  4854. * This implements the fallocate's collapse range functionality for ext4
  4855. * Returns: 0 and non-zero on error.
  4856. */
  4857. int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
  4858. {
  4859. struct super_block *sb = inode->i_sb;
  4860. ext4_lblk_t punch_start, punch_stop;
  4861. handle_t *handle;
  4862. unsigned int credits;
  4863. loff_t new_size, ioffset;
  4864. int ret;
  4865. /*
  4866. * We need to test this early because xfstests assumes that a
  4867. * collapse range of (0, 1) will return EOPNOTSUPP if the file
  4868. * system does not support collapse range.
  4869. */
  4870. if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  4871. return -EOPNOTSUPP;
  4872. /* Collapse range works only on fs block size aligned offsets. */
  4873. if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
  4874. len & (EXT4_CLUSTER_SIZE(sb) - 1))
  4875. return -EINVAL;
  4876. if (!S_ISREG(inode->i_mode))
  4877. return -EINVAL;
  4878. trace_ext4_collapse_range(inode, offset, len);
  4879. punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
  4880. punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
  4881. /* Call ext4_force_commit to flush all data in case of data=journal. */
  4882. if (ext4_should_journal_data(inode)) {
  4883. ret = ext4_force_commit(inode->i_sb);
  4884. if (ret)
  4885. return ret;
  4886. }
  4887. inode_lock(inode);
  4888. /*
  4889. * There is no need to overlap collapse range with EOF, in which case
  4890. * it is effectively a truncate operation
  4891. */
  4892. if (offset + len >= i_size_read(inode)) {
  4893. ret = -EINVAL;
  4894. goto out_mutex;
  4895. }
  4896. /* Currently just for extent based files */
  4897. if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
  4898. ret = -EOPNOTSUPP;
  4899. goto out_mutex;
  4900. }
  4901. /* Wait for existing dio to complete */
  4902. ext4_inode_block_unlocked_dio(inode);
  4903. inode_dio_wait(inode);
  4904. /*
  4905. * Prevent page faults from reinstantiating pages we have released from
  4906. * page cache.
  4907. */
  4908. down_write(&EXT4_I(inode)->i_mmap_sem);
  4909. /*
  4910. * Need to round down offset to be aligned with page size boundary
  4911. * for page size > block size.
  4912. */
  4913. ioffset = round_down(offset, PAGE_SIZE);
  4914. /*
  4915. * Write tail of the last page before removed range since it will get
  4916. * removed from the page cache below.
  4917. */
  4918. ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
  4919. if (ret)
  4920. goto out_mmap;
  4921. /*
  4922. * Write data that will be shifted to preserve them when discarding
  4923. * page cache below. We are also protected from pages becoming dirty
  4924. * by i_mmap_sem.
  4925. */
  4926. ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
  4927. LLONG_MAX);
  4928. if (ret)
  4929. goto out_mmap;
  4930. truncate_pagecache(inode, ioffset);
  4931. credits = ext4_writepage_trans_blocks(inode);
  4932. handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
  4933. if (IS_ERR(handle)) {
  4934. ret = PTR_ERR(handle);
  4935. goto out_mmap;
  4936. }
  4937. down_write(&EXT4_I(inode)->i_data_sem);
  4938. ext4_discard_preallocations(inode);
  4939. ret = ext4_es_remove_extent(inode, punch_start,
  4940. EXT_MAX_BLOCKS - punch_start);
  4941. if (ret) {
  4942. up_write(&EXT4_I(inode)->i_data_sem);
  4943. goto out_stop;
  4944. }
  4945. ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
  4946. if (ret) {
  4947. up_write(&EXT4_I(inode)->i_data_sem);
  4948. goto out_stop;
  4949. }
  4950. ext4_discard_preallocations(inode);
  4951. ret = ext4_ext_shift_extents(inode, handle, punch_stop,
  4952. punch_stop - punch_start, SHIFT_LEFT);
  4953. if (ret) {
  4954. up_write(&EXT4_I(inode)->i_data_sem);
  4955. goto out_stop;
  4956. }
  4957. new_size = i_size_read(inode) - len;
  4958. i_size_write(inode, new_size);
  4959. EXT4_I(inode)->i_disksize = new_size;
  4960. up_write(&EXT4_I(inode)->i_data_sem);
  4961. if (IS_SYNC(inode))
  4962. ext4_handle_sync(handle);
  4963. inode->i_mtime = inode->i_ctime = current_time(inode);
  4964. ext4_mark_inode_dirty(handle, inode);
  4965. ext4_update_inode_fsync_trans(handle, inode, 1);
  4966. out_stop:
  4967. ext4_journal_stop(handle);
  4968. out_mmap:
  4969. up_write(&EXT4_I(inode)->i_mmap_sem);
  4970. ext4_inode_resume_unlocked_dio(inode);
  4971. out_mutex:
  4972. inode_unlock(inode);
  4973. return ret;
  4974. }
  4975. /*
  4976. * ext4_insert_range:
  4977. * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
  4978. * The data blocks starting from @offset to the EOF are shifted by @len
  4979. * towards right to create a hole in the @inode. Inode size is increased
  4980. * by len bytes.
  4981. * Returns 0 on success, error otherwise.
  4982. */
  4983. int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
  4984. {
  4985. struct super_block *sb = inode->i_sb;
  4986. handle_t *handle;
  4987. struct ext4_ext_path *path;
  4988. struct ext4_extent *extent;
  4989. ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
  4990. unsigned int credits, ee_len;
  4991. int ret = 0, depth, split_flag = 0;
  4992. loff_t ioffset;
  4993. /*
  4994. * We need to test this early because xfstests assumes that an
  4995. * insert range of (0, 1) will return EOPNOTSUPP if the file
  4996. * system does not support insert range.
  4997. */
  4998. if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  4999. return -EOPNOTSUPP;
  5000. /* Insert range works only on fs block size aligned offsets. */
  5001. if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
  5002. len & (EXT4_CLUSTER_SIZE(sb) - 1))
  5003. return -EINVAL;
  5004. if (!S_ISREG(inode->i_mode))
  5005. return -EOPNOTSUPP;
  5006. trace_ext4_insert_range(inode, offset, len);
  5007. offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
  5008. len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
  5009. /* Call ext4_force_commit to flush all data in case of data=journal */
  5010. if (ext4_should_journal_data(inode)) {
  5011. ret = ext4_force_commit(inode->i_sb);
  5012. if (ret)
  5013. return ret;
  5014. }
  5015. inode_lock(inode);
  5016. /* Currently just for extent based files */
  5017. if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
  5018. ret = -EOPNOTSUPP;
  5019. goto out_mutex;
  5020. }
  5021. /* Check for wrap through zero */
  5022. if (inode->i_size + len > inode->i_sb->s_maxbytes) {
  5023. ret = -EFBIG;
  5024. goto out_mutex;
  5025. }
  5026. /* Offset should be less than i_size */
  5027. if (offset >= i_size_read(inode)) {
  5028. ret = -EINVAL;
  5029. goto out_mutex;
  5030. }
  5031. /* Wait for existing dio to complete */
  5032. ext4_inode_block_unlocked_dio(inode);
  5033. inode_dio_wait(inode);
  5034. /*
  5035. * Prevent page faults from reinstantiating pages we have released from
  5036. * page cache.
  5037. */
  5038. down_write(&EXT4_I(inode)->i_mmap_sem);
  5039. /*
  5040. * Need to round down to align start offset to page size boundary
  5041. * for page size > block size.
  5042. */
  5043. ioffset = round_down(offset, PAGE_SIZE);
  5044. /* Write out all dirty pages */
  5045. ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
  5046. LLONG_MAX);
  5047. if (ret)
  5048. goto out_mmap;
  5049. truncate_pagecache(inode, ioffset);
  5050. credits = ext4_writepage_trans_blocks(inode);
  5051. handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
  5052. if (IS_ERR(handle)) {
  5053. ret = PTR_ERR(handle);
  5054. goto out_mmap;
  5055. }
  5056. /* Expand file to avoid data loss if there is error while shifting */
  5057. inode->i_size += len;
  5058. EXT4_I(inode)->i_disksize += len;
  5059. inode->i_mtime = inode->i_ctime = current_time(inode);
  5060. ret = ext4_mark_inode_dirty(handle, inode);
  5061. if (ret)
  5062. goto out_stop;
  5063. down_write(&EXT4_I(inode)->i_data_sem);
  5064. ext4_discard_preallocations(inode);
  5065. path = ext4_find_extent(inode, offset_lblk, NULL, 0);
  5066. if (IS_ERR(path)) {
  5067. up_write(&EXT4_I(inode)->i_data_sem);
  5068. goto out_stop;
  5069. }
  5070. depth = ext_depth(inode);
  5071. extent = path[depth].p_ext;
  5072. if (extent) {
  5073. ee_start_lblk = le32_to_cpu(extent->ee_block);
  5074. ee_len = ext4_ext_get_actual_len(extent);
  5075. /*
  5076. * If offset_lblk is not the starting block of extent, split
  5077. * the extent @offset_lblk
  5078. */
  5079. if ((offset_lblk > ee_start_lblk) &&
  5080. (offset_lblk < (ee_start_lblk + ee_len))) {
  5081. if (ext4_ext_is_unwritten(extent))
  5082. split_flag = EXT4_EXT_MARK_UNWRIT1 |
  5083. EXT4_EXT_MARK_UNWRIT2;
  5084. ret = ext4_split_extent_at(handle, inode, &path,
  5085. offset_lblk, split_flag,
  5086. EXT4_EX_NOCACHE |
  5087. EXT4_GET_BLOCKS_PRE_IO |
  5088. EXT4_GET_BLOCKS_METADATA_NOFAIL);
  5089. }
  5090. ext4_ext_drop_refs(path);
  5091. kfree(path);
  5092. if (ret < 0) {
  5093. up_write(&EXT4_I(inode)->i_data_sem);
  5094. goto out_stop;
  5095. }
  5096. } else {
  5097. ext4_ext_drop_refs(path);
  5098. kfree(path);
  5099. }
  5100. ret = ext4_es_remove_extent(inode, offset_lblk,
  5101. EXT_MAX_BLOCKS - offset_lblk);
  5102. if (ret) {
  5103. up_write(&EXT4_I(inode)->i_data_sem);
  5104. goto out_stop;
  5105. }
  5106. /*
  5107. * if offset_lblk lies in a hole which is at start of file, use
  5108. * ee_start_lblk to shift extents
  5109. */
  5110. ret = ext4_ext_shift_extents(inode, handle,
  5111. ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
  5112. len_lblk, SHIFT_RIGHT);
  5113. up_write(&EXT4_I(inode)->i_data_sem);
  5114. if (IS_SYNC(inode))
  5115. ext4_handle_sync(handle);
  5116. if (ret >= 0)
  5117. ext4_update_inode_fsync_trans(handle, inode, 1);
  5118. out_stop:
  5119. ext4_journal_stop(handle);
  5120. out_mmap:
  5121. up_write(&EXT4_I(inode)->i_mmap_sem);
  5122. ext4_inode_resume_unlocked_dio(inode);
  5123. out_mutex:
  5124. inode_unlock(inode);
  5125. return ret;
  5126. }
  5127. /**
  5128. * ext4_swap_extents - Swap extents between two inodes
  5129. *
  5130. * @inode1: First inode
  5131. * @inode2: Second inode
  5132. * @lblk1: Start block for first inode
  5133. * @lblk2: Start block for second inode
  5134. * @count: Number of blocks to swap
  5135. * @mark_unwritten: Mark second inode's extents as unwritten after swap
  5136. * @erp: Pointer to save error value
  5137. *
  5138. * This helper routine does exactly what is promise "swap extents". All other
  5139. * stuff such as page-cache locking consistency, bh mapping consistency or
  5140. * extent's data copying must be performed by caller.
  5141. * Locking:
  5142. * i_mutex is held for both inodes
  5143. * i_data_sem is locked for write for both inodes
  5144. * Assumptions:
  5145. * All pages from requested range are locked for both inodes
  5146. */
  5147. int
  5148. ext4_swap_extents(handle_t *handle, struct inode *inode1,
  5149. struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
  5150. ext4_lblk_t count, int unwritten, int *erp)
  5151. {
  5152. struct ext4_ext_path *path1 = NULL;
  5153. struct ext4_ext_path *path2 = NULL;
  5154. int replaced_count = 0;
  5155. BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
  5156. BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
  5157. BUG_ON(!inode_is_locked(inode1));
  5158. BUG_ON(!inode_is_locked(inode2));
  5159. *erp = ext4_es_remove_extent(inode1, lblk1, count);
  5160. if (unlikely(*erp))
  5161. return 0;
  5162. *erp = ext4_es_remove_extent(inode2, lblk2, count);
  5163. if (unlikely(*erp))
  5164. return 0;
  5165. while (count) {
  5166. struct ext4_extent *ex1, *ex2, tmp_ex;
  5167. ext4_lblk_t e1_blk, e2_blk;
  5168. int e1_len, e2_len, len;
  5169. int split = 0;
  5170. path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
  5171. if (IS_ERR(path1)) {
  5172. *erp = PTR_ERR(path1);
  5173. path1 = NULL;
  5174. finish:
  5175. count = 0;
  5176. goto repeat;
  5177. }
  5178. path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
  5179. if (IS_ERR(path2)) {
  5180. *erp = PTR_ERR(path2);
  5181. path2 = NULL;
  5182. goto finish;
  5183. }
  5184. ex1 = path1[path1->p_depth].p_ext;
  5185. ex2 = path2[path2->p_depth].p_ext;
  5186. /* Do we have somthing to swap ? */
  5187. if (unlikely(!ex2 || !ex1))
  5188. goto finish;
  5189. e1_blk = le32_to_cpu(ex1->ee_block);
  5190. e2_blk = le32_to_cpu(ex2->ee_block);
  5191. e1_len = ext4_ext_get_actual_len(ex1);
  5192. e2_len = ext4_ext_get_actual_len(ex2);
  5193. /* Hole handling */
  5194. if (!in_range(lblk1, e1_blk, e1_len) ||
  5195. !in_range(lblk2, e2_blk, e2_len)) {
  5196. ext4_lblk_t next1, next2;
  5197. /* if hole after extent, then go to next extent */
  5198. next1 = ext4_ext_next_allocated_block(path1);
  5199. next2 = ext4_ext_next_allocated_block(path2);
  5200. /* If hole before extent, then shift to that extent */
  5201. if (e1_blk > lblk1)
  5202. next1 = e1_blk;
  5203. if (e2_blk > lblk2)
  5204. next2 = e2_blk;
  5205. /* Do we have something to swap */
  5206. if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
  5207. goto finish;
  5208. /* Move to the rightest boundary */
  5209. len = next1 - lblk1;
  5210. if (len < next2 - lblk2)
  5211. len = next2 - lblk2;
  5212. if (len > count)
  5213. len = count;
  5214. lblk1 += len;
  5215. lblk2 += len;
  5216. count -= len;
  5217. goto repeat;
  5218. }
  5219. /* Prepare left boundary */
  5220. if (e1_blk < lblk1) {
  5221. split = 1;
  5222. *erp = ext4_force_split_extent_at(handle, inode1,
  5223. &path1, lblk1, 0);
  5224. if (unlikely(*erp))
  5225. goto finish;
  5226. }
  5227. if (e2_blk < lblk2) {
  5228. split = 1;
  5229. *erp = ext4_force_split_extent_at(handle, inode2,
  5230. &path2, lblk2, 0);
  5231. if (unlikely(*erp))
  5232. goto finish;
  5233. }
  5234. /* ext4_split_extent_at() may result in leaf extent split,
  5235. * path must to be revalidated. */
  5236. if (split)
  5237. goto repeat;
  5238. /* Prepare right boundary */
  5239. len = count;
  5240. if (len > e1_blk + e1_len - lblk1)
  5241. len = e1_blk + e1_len - lblk1;
  5242. if (len > e2_blk + e2_len - lblk2)
  5243. len = e2_blk + e2_len - lblk2;
  5244. if (len != e1_len) {
  5245. split = 1;
  5246. *erp = ext4_force_split_extent_at(handle, inode1,
  5247. &path1, lblk1 + len, 0);
  5248. if (unlikely(*erp))
  5249. goto finish;
  5250. }
  5251. if (len != e2_len) {
  5252. split = 1;
  5253. *erp = ext4_force_split_extent_at(handle, inode2,
  5254. &path2, lblk2 + len, 0);
  5255. if (*erp)
  5256. goto finish;
  5257. }
  5258. /* ext4_split_extent_at() may result in leaf extent split,
  5259. * path must to be revalidated. */
  5260. if (split)
  5261. goto repeat;
  5262. BUG_ON(e2_len != e1_len);
  5263. *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
  5264. if (unlikely(*erp))
  5265. goto finish;
  5266. *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
  5267. if (unlikely(*erp))
  5268. goto finish;
  5269. /* Both extents are fully inside boundaries. Swap it now */
  5270. tmp_ex = *ex1;
  5271. ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
  5272. ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
  5273. ex1->ee_len = cpu_to_le16(e2_len);
  5274. ex2->ee_len = cpu_to_le16(e1_len);
  5275. if (unwritten)
  5276. ext4_ext_mark_unwritten(ex2);
  5277. if (ext4_ext_is_unwritten(&tmp_ex))
  5278. ext4_ext_mark_unwritten(ex1);
  5279. ext4_ext_try_to_merge(handle, inode2, path2, ex2);
  5280. ext4_ext_try_to_merge(handle, inode1, path1, ex1);
  5281. *erp = ext4_ext_dirty(handle, inode2, path2 +
  5282. path2->p_depth);
  5283. if (unlikely(*erp))
  5284. goto finish;
  5285. *erp = ext4_ext_dirty(handle, inode1, path1 +
  5286. path1->p_depth);
  5287. /*
  5288. * Looks scarry ah..? second inode already points to new blocks,
  5289. * and it was successfully dirtied. But luckily error may happen
  5290. * only due to journal error, so full transaction will be
  5291. * aborted anyway.
  5292. */
  5293. if (unlikely(*erp))
  5294. goto finish;
  5295. lblk1 += len;
  5296. lblk2 += len;
  5297. replaced_count += len;
  5298. count -= len;
  5299. repeat:
  5300. ext4_ext_drop_refs(path1);
  5301. kfree(path1);
  5302. ext4_ext_drop_refs(path2);
  5303. kfree(path2);
  5304. path1 = path2 = NULL;
  5305. }
  5306. return replaced_count;
  5307. }