inode.c 169 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830
  1. /*
  2. * linux/fs/ext4/inode.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Goal-directed block allocation by Stephen Tweedie
  16. * (sct@redhat.com), 1993, 1998
  17. * Big-endian to little-endian byte-swapping/bitmaps by
  18. * David S. Miller (davem@caip.rutgers.edu), 1995
  19. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  20. * (jj@sunsite.ms.mff.cuni.cz)
  21. *
  22. * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  23. */
  24. #include <linux/module.h>
  25. #include <linux/fs.h>
  26. #include <linux/time.h>
  27. #include <linux/jbd2.h>
  28. #include <linux/highuid.h>
  29. #include <linux/pagemap.h>
  30. #include <linux/quotaops.h>
  31. #include <linux/string.h>
  32. #include <linux/buffer_head.h>
  33. #include <linux/writeback.h>
  34. #include <linux/pagevec.h>
  35. #include <linux/mpage.h>
  36. #include <linux/namei.h>
  37. #include <linux/uio.h>
  38. #include <linux/bio.h>
  39. #include <linux/workqueue.h>
  40. #include "ext4_jbd2.h"
  41. #include "xattr.h"
  42. #include "acl.h"
  43. #include "ext4_extents.h"
  44. #include <trace/events/ext4.h>
  45. #define MPAGE_DA_EXTENT_TAIL 0x01
  46. static inline int ext4_begin_ordered_truncate(struct inode *inode,
  47. loff_t new_size)
  48. {
  49. return jbd2_journal_begin_ordered_truncate(
  50. EXT4_SB(inode->i_sb)->s_journal,
  51. &EXT4_I(inode)->jinode,
  52. new_size);
  53. }
  54. static void ext4_invalidatepage(struct page *page, unsigned long offset);
  55. /*
  56. * Test whether an inode is a fast symlink.
  57. */
  58. static int ext4_inode_is_fast_symlink(struct inode *inode)
  59. {
  60. int ea_blocks = EXT4_I(inode)->i_file_acl ?
  61. (inode->i_sb->s_blocksize >> 9) : 0;
  62. return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  63. }
  64. /*
  65. * Work out how many blocks we need to proceed with the next chunk of a
  66. * truncate transaction.
  67. */
  68. static unsigned long blocks_for_truncate(struct inode *inode)
  69. {
  70. ext4_lblk_t needed;
  71. needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
  72. /* Give ourselves just enough room to cope with inodes in which
  73. * i_blocks is corrupt: we've seen disk corruptions in the past
  74. * which resulted in random data in an inode which looked enough
  75. * like a regular file for ext4 to try to delete it. Things
  76. * will go a bit crazy if that happens, but at least we should
  77. * try not to panic the whole kernel. */
  78. if (needed < 2)
  79. needed = 2;
  80. /* But we need to bound the transaction so we don't overflow the
  81. * journal. */
  82. if (needed > EXT4_MAX_TRANS_DATA)
  83. needed = EXT4_MAX_TRANS_DATA;
  84. return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
  85. }
  86. /*
  87. * Truncate transactions can be complex and absolutely huge. So we need to
  88. * be able to restart the transaction at a conventient checkpoint to make
  89. * sure we don't overflow the journal.
  90. *
  91. * start_transaction gets us a new handle for a truncate transaction,
  92. * and extend_transaction tries to extend the existing one a bit. If
  93. * extend fails, we need to propagate the failure up and restart the
  94. * transaction in the top-level truncate loop. --sct
  95. */
  96. static handle_t *start_transaction(struct inode *inode)
  97. {
  98. handle_t *result;
  99. result = ext4_journal_start(inode, blocks_for_truncate(inode));
  100. if (!IS_ERR(result))
  101. return result;
  102. ext4_std_error(inode->i_sb, PTR_ERR(result));
  103. return result;
  104. }
  105. /*
  106. * Try to extend this transaction for the purposes of truncation.
  107. *
  108. * Returns 0 if we managed to create more room. If we can't create more
  109. * room, and the transaction must be restarted we return 1.
  110. */
  111. static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  112. {
  113. if (!ext4_handle_valid(handle))
  114. return 0;
  115. if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
  116. return 0;
  117. if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
  118. return 0;
  119. return 1;
  120. }
  121. /*
  122. * Restart the transaction associated with *handle. This does a commit,
  123. * so before we call here everything must be consistently dirtied against
  124. * this transaction.
  125. */
  126. int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
  127. int nblocks)
  128. {
  129. int ret;
  130. /*
  131. * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
  132. * moment, get_block can be called only for blocks inside i_size since
  133. * page cache has been already dropped and writes are blocked by
  134. * i_mutex. So we can safely drop the i_data_sem here.
  135. */
  136. BUG_ON(EXT4_JOURNAL(inode) == NULL);
  137. jbd_debug(2, "restarting handle %p\n", handle);
  138. up_write(&EXT4_I(inode)->i_data_sem);
  139. ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
  140. down_write(&EXT4_I(inode)->i_data_sem);
  141. ext4_discard_preallocations(inode);
  142. return ret;
  143. }
  144. /*
  145. * Called at the last iput() if i_nlink is zero.
  146. */
  147. void ext4_delete_inode(struct inode *inode)
  148. {
  149. handle_t *handle;
  150. int err;
  151. if (!is_bad_inode(inode))
  152. dquot_initialize(inode);
  153. if (ext4_should_order_data(inode))
  154. ext4_begin_ordered_truncate(inode, 0);
  155. truncate_inode_pages(&inode->i_data, 0);
  156. if (is_bad_inode(inode))
  157. goto no_delete;
  158. handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
  159. if (IS_ERR(handle)) {
  160. ext4_std_error(inode->i_sb, PTR_ERR(handle));
  161. /*
  162. * If we're going to skip the normal cleanup, we still need to
  163. * make sure that the in-core orphan linked list is properly
  164. * cleaned up.
  165. */
  166. ext4_orphan_del(NULL, inode);
  167. goto no_delete;
  168. }
  169. if (IS_SYNC(inode))
  170. ext4_handle_sync(handle);
  171. inode->i_size = 0;
  172. err = ext4_mark_inode_dirty(handle, inode);
  173. if (err) {
  174. ext4_warning(inode->i_sb, __func__,
  175. "couldn't mark inode dirty (err %d)", err);
  176. goto stop_handle;
  177. }
  178. if (inode->i_blocks)
  179. ext4_truncate(inode);
  180. /*
  181. * ext4_ext_truncate() doesn't reserve any slop when it
  182. * restarts journal transactions; therefore there may not be
  183. * enough credits left in the handle to remove the inode from
  184. * the orphan list and set the dtime field.
  185. */
  186. if (!ext4_handle_has_enough_credits(handle, 3)) {
  187. err = ext4_journal_extend(handle, 3);
  188. if (err > 0)
  189. err = ext4_journal_restart(handle, 3);
  190. if (err != 0) {
  191. ext4_warning(inode->i_sb, __func__,
  192. "couldn't extend journal (err %d)", err);
  193. stop_handle:
  194. ext4_journal_stop(handle);
  195. goto no_delete;
  196. }
  197. }
  198. /*
  199. * Kill off the orphan record which ext4_truncate created.
  200. * AKPM: I think this can be inside the above `if'.
  201. * Note that ext4_orphan_del() has to be able to cope with the
  202. * deletion of a non-existent orphan - this is because we don't
  203. * know if ext4_truncate() actually created an orphan record.
  204. * (Well, we could do this if we need to, but heck - it works)
  205. */
  206. ext4_orphan_del(handle, inode);
  207. EXT4_I(inode)->i_dtime = get_seconds();
  208. /*
  209. * One subtle ordering requirement: if anything has gone wrong
  210. * (transaction abort, IO errors, whatever), then we can still
  211. * do these next steps (the fs will already have been marked as
  212. * having errors), but we can't free the inode if the mark_dirty
  213. * fails.
  214. */
  215. if (ext4_mark_inode_dirty(handle, inode))
  216. /* If that failed, just do the required in-core inode clear. */
  217. clear_inode(inode);
  218. else
  219. ext4_free_inode(handle, inode);
  220. ext4_journal_stop(handle);
  221. return;
  222. no_delete:
  223. clear_inode(inode); /* We must guarantee clearing of inode... */
  224. }
  225. typedef struct {
  226. __le32 *p;
  227. __le32 key;
  228. struct buffer_head *bh;
  229. } Indirect;
  230. static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
  231. {
  232. p->key = *(p->p = v);
  233. p->bh = bh;
  234. }
  235. /**
  236. * ext4_block_to_path - parse the block number into array of offsets
  237. * @inode: inode in question (we are only interested in its superblock)
  238. * @i_block: block number to be parsed
  239. * @offsets: array to store the offsets in
  240. * @boundary: set this non-zero if the referred-to block is likely to be
  241. * followed (on disk) by an indirect block.
  242. *
  243. * To store the locations of file's data ext4 uses a data structure common
  244. * for UNIX filesystems - tree of pointers anchored in the inode, with
  245. * data blocks at leaves and indirect blocks in intermediate nodes.
  246. * This function translates the block number into path in that tree -
  247. * return value is the path length and @offsets[n] is the offset of
  248. * pointer to (n+1)th node in the nth one. If @block is out of range
  249. * (negative or too large) warning is printed and zero returned.
  250. *
  251. * Note: function doesn't find node addresses, so no IO is needed. All
  252. * we need to know is the capacity of indirect blocks (taken from the
  253. * inode->i_sb).
  254. */
  255. /*
  256. * Portability note: the last comparison (check that we fit into triple
  257. * indirect block) is spelled differently, because otherwise on an
  258. * architecture with 32-bit longs and 8Kb pages we might get into trouble
  259. * if our filesystem had 8Kb blocks. We might use long long, but that would
  260. * kill us on x86. Oh, well, at least the sign propagation does not matter -
  261. * i_block would have to be negative in the very beginning, so we would not
  262. * get there at all.
  263. */
  264. static int ext4_block_to_path(struct inode *inode,
  265. ext4_lblk_t i_block,
  266. ext4_lblk_t offsets[4], int *boundary)
  267. {
  268. int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  269. int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
  270. const long direct_blocks = EXT4_NDIR_BLOCKS,
  271. indirect_blocks = ptrs,
  272. double_blocks = (1 << (ptrs_bits * 2));
  273. int n = 0;
  274. int final = 0;
  275. if (i_block < direct_blocks) {
  276. offsets[n++] = i_block;
  277. final = direct_blocks;
  278. } else if ((i_block -= direct_blocks) < indirect_blocks) {
  279. offsets[n++] = EXT4_IND_BLOCK;
  280. offsets[n++] = i_block;
  281. final = ptrs;
  282. } else if ((i_block -= indirect_blocks) < double_blocks) {
  283. offsets[n++] = EXT4_DIND_BLOCK;
  284. offsets[n++] = i_block >> ptrs_bits;
  285. offsets[n++] = i_block & (ptrs - 1);
  286. final = ptrs;
  287. } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  288. offsets[n++] = EXT4_TIND_BLOCK;
  289. offsets[n++] = i_block >> (ptrs_bits * 2);
  290. offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  291. offsets[n++] = i_block & (ptrs - 1);
  292. final = ptrs;
  293. } else {
  294. ext4_warning(inode->i_sb, "ext4_block_to_path",
  295. "block %lu > max in inode %lu",
  296. i_block + direct_blocks +
  297. indirect_blocks + double_blocks, inode->i_ino);
  298. }
  299. if (boundary)
  300. *boundary = final - 1 - (i_block & (ptrs - 1));
  301. return n;
  302. }
  303. static int __ext4_check_blockref(const char *function, struct inode *inode,
  304. __le32 *p, unsigned int max)
  305. {
  306. __le32 *bref = p;
  307. unsigned int blk;
  308. while (bref < p+max) {
  309. blk = le32_to_cpu(*bref++);
  310. if (blk &&
  311. unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
  312. blk, 1))) {
  313. ext4_error(inode->i_sb, function,
  314. "invalid block reference %u "
  315. "in inode #%lu", blk, inode->i_ino);
  316. return -EIO;
  317. }
  318. }
  319. return 0;
  320. }
  321. #define ext4_check_indirect_blockref(inode, bh) \
  322. __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \
  323. EXT4_ADDR_PER_BLOCK((inode)->i_sb))
  324. #define ext4_check_inode_blockref(inode) \
  325. __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \
  326. EXT4_NDIR_BLOCKS)
  327. /**
  328. * ext4_get_branch - read the chain of indirect blocks leading to data
  329. * @inode: inode in question
  330. * @depth: depth of the chain (1 - direct pointer, etc.)
  331. * @offsets: offsets of pointers in inode/indirect blocks
  332. * @chain: place to store the result
  333. * @err: here we store the error value
  334. *
  335. * Function fills the array of triples <key, p, bh> and returns %NULL
  336. * if everything went OK or the pointer to the last filled triple
  337. * (incomplete one) otherwise. Upon the return chain[i].key contains
  338. * the number of (i+1)-th block in the chain (as it is stored in memory,
  339. * i.e. little-endian 32-bit), chain[i].p contains the address of that
  340. * number (it points into struct inode for i==0 and into the bh->b_data
  341. * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
  342. * block for i>0 and NULL for i==0. In other words, it holds the block
  343. * numbers of the chain, addresses they were taken from (and where we can
  344. * verify that chain did not change) and buffer_heads hosting these
  345. * numbers.
  346. *
  347. * Function stops when it stumbles upon zero pointer (absent block)
  348. * (pointer to last triple returned, *@err == 0)
  349. * or when it gets an IO error reading an indirect block
  350. * (ditto, *@err == -EIO)
  351. * or when it reads all @depth-1 indirect blocks successfully and finds
  352. * the whole chain, all way to the data (returns %NULL, *err == 0).
  353. *
  354. * Need to be called with
  355. * down_read(&EXT4_I(inode)->i_data_sem)
  356. */
  357. static Indirect *ext4_get_branch(struct inode *inode, int depth,
  358. ext4_lblk_t *offsets,
  359. Indirect chain[4], int *err)
  360. {
  361. struct super_block *sb = inode->i_sb;
  362. Indirect *p = chain;
  363. struct buffer_head *bh;
  364. *err = 0;
  365. /* i_data is not going away, no lock needed */
  366. add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
  367. if (!p->key)
  368. goto no_block;
  369. while (--depth) {
  370. bh = sb_getblk(sb, le32_to_cpu(p->key));
  371. if (unlikely(!bh))
  372. goto failure;
  373. if (!bh_uptodate_or_lock(bh)) {
  374. if (bh_submit_read(bh) < 0) {
  375. put_bh(bh);
  376. goto failure;
  377. }
  378. /* validate block references */
  379. if (ext4_check_indirect_blockref(inode, bh)) {
  380. put_bh(bh);
  381. goto failure;
  382. }
  383. }
  384. add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
  385. /* Reader: end */
  386. if (!p->key)
  387. goto no_block;
  388. }
  389. return NULL;
  390. failure:
  391. *err = -EIO;
  392. no_block:
  393. return p;
  394. }
  395. /**
  396. * ext4_find_near - find a place for allocation with sufficient locality
  397. * @inode: owner
  398. * @ind: descriptor of indirect block.
  399. *
  400. * This function returns the preferred place for block allocation.
  401. * It is used when heuristic for sequential allocation fails.
  402. * Rules are:
  403. * + if there is a block to the left of our position - allocate near it.
  404. * + if pointer will live in indirect block - allocate near that block.
  405. * + if pointer will live in inode - allocate in the same
  406. * cylinder group.
  407. *
  408. * In the latter case we colour the starting block by the callers PID to
  409. * prevent it from clashing with concurrent allocations for a different inode
  410. * in the same block group. The PID is used here so that functionally related
  411. * files will be close-by on-disk.
  412. *
  413. * Caller must make sure that @ind is valid and will stay that way.
  414. */
  415. static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
  416. {
  417. struct ext4_inode_info *ei = EXT4_I(inode);
  418. __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
  419. __le32 *p;
  420. ext4_fsblk_t bg_start;
  421. ext4_fsblk_t last_block;
  422. ext4_grpblk_t colour;
  423. ext4_group_t block_group;
  424. int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
  425. /* Try to find previous block */
  426. for (p = ind->p - 1; p >= start; p--) {
  427. if (*p)
  428. return le32_to_cpu(*p);
  429. }
  430. /* No such thing, so let's try location of indirect block */
  431. if (ind->bh)
  432. return ind->bh->b_blocknr;
  433. /*
  434. * It is going to be referred to from the inode itself? OK, just put it
  435. * into the same cylinder group then.
  436. */
  437. block_group = ei->i_block_group;
  438. if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
  439. block_group &= ~(flex_size-1);
  440. if (S_ISREG(inode->i_mode))
  441. block_group++;
  442. }
  443. bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
  444. last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
  445. /*
  446. * If we are doing delayed allocation, we don't need take
  447. * colour into account.
  448. */
  449. if (test_opt(inode->i_sb, DELALLOC))
  450. return bg_start;
  451. if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
  452. colour = (current->pid % 16) *
  453. (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
  454. else
  455. colour = (current->pid % 16) * ((last_block - bg_start) / 16);
  456. return bg_start + colour;
  457. }
  458. /**
  459. * ext4_find_goal - find a preferred place for allocation.
  460. * @inode: owner
  461. * @block: block we want
  462. * @partial: pointer to the last triple within a chain
  463. *
  464. * Normally this function find the preferred place for block allocation,
  465. * returns it.
  466. * Because this is only used for non-extent files, we limit the block nr
  467. * to 32 bits.
  468. */
  469. static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
  470. Indirect *partial)
  471. {
  472. ext4_fsblk_t goal;
  473. /*
  474. * XXX need to get goal block from mballoc's data structures
  475. */
  476. goal = ext4_find_near(inode, partial);
  477. goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
  478. return goal;
  479. }
  480. /**
  481. * ext4_blks_to_allocate: Look up the block map and count the number
  482. * of direct blocks need to be allocated for the given branch.
  483. *
  484. * @branch: chain of indirect blocks
  485. * @k: number of blocks need for indirect blocks
  486. * @blks: number of data blocks to be mapped.
  487. * @blocks_to_boundary: the offset in the indirect block
  488. *
  489. * return the total number of blocks to be allocate, including the
  490. * direct and indirect blocks.
  491. */
  492. static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
  493. int blocks_to_boundary)
  494. {
  495. unsigned int count = 0;
  496. /*
  497. * Simple case, [t,d]Indirect block(s) has not allocated yet
  498. * then it's clear blocks on that path have not allocated
  499. */
  500. if (k > 0) {
  501. /* right now we don't handle cross boundary allocation */
  502. if (blks < blocks_to_boundary + 1)
  503. count += blks;
  504. else
  505. count += blocks_to_boundary + 1;
  506. return count;
  507. }
  508. count++;
  509. while (count < blks && count <= blocks_to_boundary &&
  510. le32_to_cpu(*(branch[0].p + count)) == 0) {
  511. count++;
  512. }
  513. return count;
  514. }
  515. /**
  516. * ext4_alloc_blocks: multiple allocate blocks needed for a branch
  517. * @indirect_blks: the number of blocks need to allocate for indirect
  518. * blocks
  519. *
  520. * @new_blocks: on return it will store the new block numbers for
  521. * the indirect blocks(if needed) and the first direct block,
  522. * @blks: on return it will store the total number of allocated
  523. * direct blocks
  524. */
  525. static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
  526. ext4_lblk_t iblock, ext4_fsblk_t goal,
  527. int indirect_blks, int blks,
  528. ext4_fsblk_t new_blocks[4], int *err)
  529. {
  530. struct ext4_allocation_request ar;
  531. int target, i;
  532. unsigned long count = 0, blk_allocated = 0;
  533. int index = 0;
  534. ext4_fsblk_t current_block = 0;
  535. int ret = 0;
  536. /*
  537. * Here we try to allocate the requested multiple blocks at once,
  538. * on a best-effort basis.
  539. * To build a branch, we should allocate blocks for
  540. * the indirect blocks(if not allocated yet), and at least
  541. * the first direct block of this branch. That's the
  542. * minimum number of blocks need to allocate(required)
  543. */
  544. /* first we try to allocate the indirect blocks */
  545. target = indirect_blks;
  546. while (target > 0) {
  547. count = target;
  548. /* allocating blocks for indirect blocks and direct blocks */
  549. current_block = ext4_new_meta_blocks(handle, inode,
  550. goal, &count, err);
  551. if (*err)
  552. goto failed_out;
  553. BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS);
  554. target -= count;
  555. /* allocate blocks for indirect blocks */
  556. while (index < indirect_blks && count) {
  557. new_blocks[index++] = current_block++;
  558. count--;
  559. }
  560. if (count > 0) {
  561. /*
  562. * save the new block number
  563. * for the first direct block
  564. */
  565. new_blocks[index] = current_block;
  566. printk(KERN_INFO "%s returned more blocks than "
  567. "requested\n", __func__);
  568. WARN_ON(1);
  569. break;
  570. }
  571. }
  572. target = blks - count ;
  573. blk_allocated = count;
  574. if (!target)
  575. goto allocated;
  576. /* Now allocate data blocks */
  577. memset(&ar, 0, sizeof(ar));
  578. ar.inode = inode;
  579. ar.goal = goal;
  580. ar.len = target;
  581. ar.logical = iblock;
  582. if (S_ISREG(inode->i_mode))
  583. /* enable in-core preallocation only for regular files */
  584. ar.flags = EXT4_MB_HINT_DATA;
  585. current_block = ext4_mb_new_blocks(handle, &ar, err);
  586. BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS);
  587. if (*err && (target == blks)) {
  588. /*
  589. * if the allocation failed and we didn't allocate
  590. * any blocks before
  591. */
  592. goto failed_out;
  593. }
  594. if (!*err) {
  595. if (target == blks) {
  596. /*
  597. * save the new block number
  598. * for the first direct block
  599. */
  600. new_blocks[index] = current_block;
  601. }
  602. blk_allocated += ar.len;
  603. }
  604. allocated:
  605. /* total number of blocks allocated for direct blocks */
  606. ret = blk_allocated;
  607. *err = 0;
  608. return ret;
  609. failed_out:
  610. for (i = 0; i < index; i++)
  611. ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
  612. return ret;
  613. }
  614. /**
  615. * ext4_alloc_branch - allocate and set up a chain of blocks.
  616. * @inode: owner
  617. * @indirect_blks: number of allocated indirect blocks
  618. * @blks: number of allocated direct blocks
  619. * @offsets: offsets (in the blocks) to store the pointers to next.
  620. * @branch: place to store the chain in.
  621. *
  622. * This function allocates blocks, zeroes out all but the last one,
  623. * links them into chain and (if we are synchronous) writes them to disk.
  624. * In other words, it prepares a branch that can be spliced onto the
  625. * inode. It stores the information about that chain in the branch[], in
  626. * the same format as ext4_get_branch() would do. We are calling it after
  627. * we had read the existing part of chain and partial points to the last
  628. * triple of that (one with zero ->key). Upon the exit we have the same
  629. * picture as after the successful ext4_get_block(), except that in one
  630. * place chain is disconnected - *branch->p is still zero (we did not
  631. * set the last link), but branch->key contains the number that should
  632. * be placed into *branch->p to fill that gap.
  633. *
  634. * If allocation fails we free all blocks we've allocated (and forget
  635. * their buffer_heads) and return the error value the from failed
  636. * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
  637. * as described above and return 0.
  638. */
  639. static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
  640. ext4_lblk_t iblock, int indirect_blks,
  641. int *blks, ext4_fsblk_t goal,
  642. ext4_lblk_t *offsets, Indirect *branch)
  643. {
  644. int blocksize = inode->i_sb->s_blocksize;
  645. int i, n = 0;
  646. int err = 0;
  647. struct buffer_head *bh;
  648. int num;
  649. ext4_fsblk_t new_blocks[4];
  650. ext4_fsblk_t current_block;
  651. num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
  652. *blks, new_blocks, &err);
  653. if (err)
  654. return err;
  655. branch[0].key = cpu_to_le32(new_blocks[0]);
  656. /*
  657. * metadata blocks and data blocks are allocated.
  658. */
  659. for (n = 1; n <= indirect_blks; n++) {
  660. /*
  661. * Get buffer_head for parent block, zero it out
  662. * and set the pointer to new one, then send
  663. * parent to disk.
  664. */
  665. bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
  666. branch[n].bh = bh;
  667. lock_buffer(bh);
  668. BUFFER_TRACE(bh, "call get_create_access");
  669. err = ext4_journal_get_create_access(handle, bh);
  670. if (err) {
  671. /* Don't brelse(bh) here; it's done in
  672. * ext4_journal_forget() below */
  673. unlock_buffer(bh);
  674. goto failed;
  675. }
  676. memset(bh->b_data, 0, blocksize);
  677. branch[n].p = (__le32 *) bh->b_data + offsets[n];
  678. branch[n].key = cpu_to_le32(new_blocks[n]);
  679. *branch[n].p = branch[n].key;
  680. if (n == indirect_blks) {
  681. current_block = new_blocks[n];
  682. /*
  683. * End of chain, update the last new metablock of
  684. * the chain to point to the new allocated
  685. * data blocks numbers
  686. */
  687. for (i = 1; i < num; i++)
  688. *(branch[n].p + i) = cpu_to_le32(++current_block);
  689. }
  690. BUFFER_TRACE(bh, "marking uptodate");
  691. set_buffer_uptodate(bh);
  692. unlock_buffer(bh);
  693. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  694. err = ext4_handle_dirty_metadata(handle, inode, bh);
  695. if (err)
  696. goto failed;
  697. }
  698. *blks = num;
  699. return err;
  700. failed:
  701. /* Allocation failed, free what we already allocated */
  702. ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
  703. for (i = 1; i <= n ; i++) {
  704. /*
  705. * branch[i].bh is newly allocated, so there is no
  706. * need to revoke the block, which is why we don't
  707. * need to set EXT4_FREE_BLOCKS_METADATA.
  708. */
  709. ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
  710. EXT4_FREE_BLOCKS_FORGET);
  711. }
  712. for (i = n+1; i < indirect_blks; i++)
  713. ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
  714. ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
  715. return err;
  716. }
  717. /**
  718. * ext4_splice_branch - splice the allocated branch onto inode.
  719. * @inode: owner
  720. * @block: (logical) number of block we are adding
  721. * @chain: chain of indirect blocks (with a missing link - see
  722. * ext4_alloc_branch)
  723. * @where: location of missing link
  724. * @num: number of indirect blocks we are adding
  725. * @blks: number of direct blocks we are adding
  726. *
  727. * This function fills the missing link and does all housekeeping needed in
  728. * inode (->i_blocks, etc.). In case of success we end up with the full
  729. * chain to new block and return 0.
  730. */
  731. static int ext4_splice_branch(handle_t *handle, struct inode *inode,
  732. ext4_lblk_t block, Indirect *where, int num,
  733. int blks)
  734. {
  735. int i;
  736. int err = 0;
  737. ext4_fsblk_t current_block;
  738. /*
  739. * If we're splicing into a [td]indirect block (as opposed to the
  740. * inode) then we need to get write access to the [td]indirect block
  741. * before the splice.
  742. */
  743. if (where->bh) {
  744. BUFFER_TRACE(where->bh, "get_write_access");
  745. err = ext4_journal_get_write_access(handle, where->bh);
  746. if (err)
  747. goto err_out;
  748. }
  749. /* That's it */
  750. *where->p = where->key;
  751. /*
  752. * Update the host buffer_head or inode to point to more just allocated
  753. * direct blocks blocks
  754. */
  755. if (num == 0 && blks > 1) {
  756. current_block = le32_to_cpu(where->key) + 1;
  757. for (i = 1; i < blks; i++)
  758. *(where->p + i) = cpu_to_le32(current_block++);
  759. }
  760. /* We are done with atomic stuff, now do the rest of housekeeping */
  761. /* had we spliced it onto indirect block? */
  762. if (where->bh) {
  763. /*
  764. * If we spliced it onto an indirect block, we haven't
  765. * altered the inode. Note however that if it is being spliced
  766. * onto an indirect block at the very end of the file (the
  767. * file is growing) then we *will* alter the inode to reflect
  768. * the new i_size. But that is not done here - it is done in
  769. * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
  770. */
  771. jbd_debug(5, "splicing indirect only\n");
  772. BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
  773. err = ext4_handle_dirty_metadata(handle, inode, where->bh);
  774. if (err)
  775. goto err_out;
  776. } else {
  777. /*
  778. * OK, we spliced it into the inode itself on a direct block.
  779. */
  780. ext4_mark_inode_dirty(handle, inode);
  781. jbd_debug(5, "splicing direct\n");
  782. }
  783. return err;
  784. err_out:
  785. for (i = 1; i <= num; i++) {
  786. /*
  787. * branch[i].bh is newly allocated, so there is no
  788. * need to revoke the block, which is why we don't
  789. * need to set EXT4_FREE_BLOCKS_METADATA.
  790. */
  791. ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
  792. EXT4_FREE_BLOCKS_FORGET);
  793. }
  794. ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
  795. blks, 0);
  796. return err;
  797. }
  798. /*
  799. * The ext4_ind_get_blocks() function handles non-extents inodes
  800. * (i.e., using the traditional indirect/double-indirect i_blocks
  801. * scheme) for ext4_get_blocks().
  802. *
  803. * Allocation strategy is simple: if we have to allocate something, we will
  804. * have to go the whole way to leaf. So let's do it before attaching anything
  805. * to tree, set linkage between the newborn blocks, write them if sync is
  806. * required, recheck the path, free and repeat if check fails, otherwise
  807. * set the last missing link (that will protect us from any truncate-generated
  808. * removals - all blocks on the path are immune now) and possibly force the
  809. * write on the parent block.
  810. * That has a nice additional property: no special recovery from the failed
  811. * allocations is needed - we simply release blocks and do not touch anything
  812. * reachable from inode.
  813. *
  814. * `handle' can be NULL if create == 0.
  815. *
  816. * return > 0, # of blocks mapped or allocated.
  817. * return = 0, if plain lookup failed.
  818. * return < 0, error case.
  819. *
  820. * The ext4_ind_get_blocks() function should be called with
  821. * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
  822. * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
  823. * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
  824. * blocks.
  825. */
  826. static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
  827. ext4_lblk_t iblock, unsigned int maxblocks,
  828. struct buffer_head *bh_result,
  829. int flags)
  830. {
  831. int err = -EIO;
  832. ext4_lblk_t offsets[4];
  833. Indirect chain[4];
  834. Indirect *partial;
  835. ext4_fsblk_t goal;
  836. int indirect_blks;
  837. int blocks_to_boundary = 0;
  838. int depth;
  839. int count = 0;
  840. ext4_fsblk_t first_block = 0;
  841. J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
  842. J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
  843. depth = ext4_block_to_path(inode, iblock, offsets,
  844. &blocks_to_boundary);
  845. if (depth == 0)
  846. goto out;
  847. partial = ext4_get_branch(inode, depth, offsets, chain, &err);
  848. /* Simplest case - block found, no allocation needed */
  849. if (!partial) {
  850. first_block = le32_to_cpu(chain[depth - 1].key);
  851. clear_buffer_new(bh_result);
  852. count++;
  853. /*map more blocks*/
  854. while (count < maxblocks && count <= blocks_to_boundary) {
  855. ext4_fsblk_t blk;
  856. blk = le32_to_cpu(*(chain[depth-1].p + count));
  857. if (blk == first_block + count)
  858. count++;
  859. else
  860. break;
  861. }
  862. goto got_it;
  863. }
  864. /* Next simple case - plain lookup or failed read of indirect block */
  865. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
  866. goto cleanup;
  867. /*
  868. * Okay, we need to do block allocation.
  869. */
  870. goal = ext4_find_goal(inode, iblock, partial);
  871. /* the number of blocks need to allocate for [d,t]indirect blocks */
  872. indirect_blks = (chain + depth) - partial - 1;
  873. /*
  874. * Next look up the indirect map to count the totoal number of
  875. * direct blocks to allocate for this branch.
  876. */
  877. count = ext4_blks_to_allocate(partial, indirect_blks,
  878. maxblocks, blocks_to_boundary);
  879. /*
  880. * Block out ext4_truncate while we alter the tree
  881. */
  882. err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
  883. &count, goal,
  884. offsets + (partial - chain), partial);
  885. /*
  886. * The ext4_splice_branch call will free and forget any buffers
  887. * on the new chain if there is a failure, but that risks using
  888. * up transaction credits, especially for bitmaps where the
  889. * credits cannot be returned. Can we handle this somehow? We
  890. * may need to return -EAGAIN upwards in the worst case. --sct
  891. */
  892. if (!err)
  893. err = ext4_splice_branch(handle, inode, iblock,
  894. partial, indirect_blks, count);
  895. if (err)
  896. goto cleanup;
  897. set_buffer_new(bh_result);
  898. ext4_update_inode_fsync_trans(handle, inode, 1);
  899. got_it:
  900. map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
  901. if (count > blocks_to_boundary)
  902. set_buffer_boundary(bh_result);
  903. err = count;
  904. /* Clean up and exit */
  905. partial = chain + depth - 1; /* the whole chain */
  906. cleanup:
  907. while (partial > chain) {
  908. BUFFER_TRACE(partial->bh, "call brelse");
  909. brelse(partial->bh);
  910. partial--;
  911. }
  912. BUFFER_TRACE(bh_result, "returned");
  913. out:
  914. return err;
  915. }
  916. #ifdef CONFIG_QUOTA
  917. qsize_t *ext4_get_reserved_space(struct inode *inode)
  918. {
  919. return &EXT4_I(inode)->i_reserved_quota;
  920. }
  921. #endif
  922. /*
  923. * Calculate the number of metadata blocks need to reserve
  924. * to allocate a new block at @lblocks for non extent file based file
  925. */
  926. static int ext4_indirect_calc_metadata_amount(struct inode *inode,
  927. sector_t lblock)
  928. {
  929. struct ext4_inode_info *ei = EXT4_I(inode);
  930. int dind_mask = EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1;
  931. int blk_bits;
  932. if (lblock < EXT4_NDIR_BLOCKS)
  933. return 0;
  934. lblock -= EXT4_NDIR_BLOCKS;
  935. if (ei->i_da_metadata_calc_len &&
  936. (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
  937. ei->i_da_metadata_calc_len++;
  938. return 0;
  939. }
  940. ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
  941. ei->i_da_metadata_calc_len = 1;
  942. blk_bits = roundup_pow_of_two(lblock + 1);
  943. return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
  944. }
  945. /*
  946. * Calculate the number of metadata blocks need to reserve
  947. * to allocate a block located at @lblock
  948. */
  949. static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
  950. {
  951. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
  952. return ext4_ext_calc_metadata_amount(inode, lblock);
  953. return ext4_indirect_calc_metadata_amount(inode, lblock);
  954. }
  955. /*
  956. * Called with i_data_sem down, which is important since we can call
  957. * ext4_discard_preallocations() from here.
  958. */
  959. void ext4_da_update_reserve_space(struct inode *inode,
  960. int used, int quota_claim)
  961. {
  962. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  963. struct ext4_inode_info *ei = EXT4_I(inode);
  964. int mdb_free = 0, allocated_meta_blocks = 0;
  965. spin_lock(&ei->i_block_reservation_lock);
  966. if (unlikely(used > ei->i_reserved_data_blocks)) {
  967. ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
  968. "with only %d reserved data blocks\n",
  969. __func__, inode->i_ino, used,
  970. ei->i_reserved_data_blocks);
  971. WARN_ON(1);
  972. used = ei->i_reserved_data_blocks;
  973. }
  974. /* Update per-inode reservations */
  975. ei->i_reserved_data_blocks -= used;
  976. used += ei->i_allocated_meta_blocks;
  977. ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
  978. allocated_meta_blocks = ei->i_allocated_meta_blocks;
  979. ei->i_allocated_meta_blocks = 0;
  980. percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
  981. if (ei->i_reserved_data_blocks == 0) {
  982. /*
  983. * We can release all of the reserved metadata blocks
  984. * only when we have written all of the delayed
  985. * allocation blocks.
  986. */
  987. mdb_free = ei->i_reserved_meta_blocks;
  988. ei->i_reserved_meta_blocks = 0;
  989. ei->i_da_metadata_calc_len = 0;
  990. percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
  991. }
  992. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  993. /* Update quota subsystem */
  994. if (quota_claim) {
  995. dquot_claim_block(inode, used);
  996. if (mdb_free)
  997. dquot_release_reservation_block(inode, mdb_free);
  998. } else {
  999. /*
  1000. * We did fallocate with an offset that is already delayed
  1001. * allocated. So on delayed allocated writeback we should
  1002. * not update the quota for allocated blocks. But then
  1003. * converting an fallocate region to initialized region would
  1004. * have caused a metadata allocation. So claim quota for
  1005. * that
  1006. */
  1007. if (allocated_meta_blocks)
  1008. dquot_claim_block(inode, allocated_meta_blocks);
  1009. dquot_release_reservation_block(inode, mdb_free + used);
  1010. }
  1011. /*
  1012. * If we have done all the pending block allocations and if
  1013. * there aren't any writers on the inode, we can discard the
  1014. * inode's preallocations.
  1015. */
  1016. if ((ei->i_reserved_data_blocks == 0) &&
  1017. (atomic_read(&inode->i_writecount) == 0))
  1018. ext4_discard_preallocations(inode);
  1019. }
  1020. static int check_block_validity(struct inode *inode, const char *msg,
  1021. sector_t logical, sector_t phys, int len)
  1022. {
  1023. if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
  1024. ext4_error(inode->i_sb, msg,
  1025. "inode #%lu logical block %llu mapped to %llu "
  1026. "(size %d)", inode->i_ino,
  1027. (unsigned long long) logical,
  1028. (unsigned long long) phys, len);
  1029. return -EIO;
  1030. }
  1031. return 0;
  1032. }
  1033. /*
  1034. * Return the number of contiguous dirty pages in a given inode
  1035. * starting at page frame idx.
  1036. */
  1037. static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
  1038. unsigned int max_pages)
  1039. {
  1040. struct address_space *mapping = inode->i_mapping;
  1041. pgoff_t index;
  1042. struct pagevec pvec;
  1043. pgoff_t num = 0;
  1044. int i, nr_pages, done = 0;
  1045. if (max_pages == 0)
  1046. return 0;
  1047. pagevec_init(&pvec, 0);
  1048. while (!done) {
  1049. index = idx;
  1050. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  1051. PAGECACHE_TAG_DIRTY,
  1052. (pgoff_t)PAGEVEC_SIZE);
  1053. if (nr_pages == 0)
  1054. break;
  1055. for (i = 0; i < nr_pages; i++) {
  1056. struct page *page = pvec.pages[i];
  1057. struct buffer_head *bh, *head;
  1058. lock_page(page);
  1059. if (unlikely(page->mapping != mapping) ||
  1060. !PageDirty(page) ||
  1061. PageWriteback(page) ||
  1062. page->index != idx) {
  1063. done = 1;
  1064. unlock_page(page);
  1065. break;
  1066. }
  1067. if (page_has_buffers(page)) {
  1068. bh = head = page_buffers(page);
  1069. do {
  1070. if (!buffer_delay(bh) &&
  1071. !buffer_unwritten(bh))
  1072. done = 1;
  1073. bh = bh->b_this_page;
  1074. } while (!done && (bh != head));
  1075. }
  1076. unlock_page(page);
  1077. if (done)
  1078. break;
  1079. idx++;
  1080. num++;
  1081. if (num >= max_pages)
  1082. break;
  1083. }
  1084. pagevec_release(&pvec);
  1085. }
  1086. return num;
  1087. }
  1088. /*
  1089. * The ext4_get_blocks() function tries to look up the requested blocks,
  1090. * and returns if the blocks are already mapped.
  1091. *
  1092. * Otherwise it takes the write lock of the i_data_sem and allocate blocks
  1093. * and store the allocated blocks in the result buffer head and mark it
  1094. * mapped.
  1095. *
  1096. * If file type is extents based, it will call ext4_ext_get_blocks(),
  1097. * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
  1098. * based files
  1099. *
  1100. * On success, it returns the number of blocks being mapped or allocate.
  1101. * if create==0 and the blocks are pre-allocated and uninitialized block,
  1102. * the result buffer head is unmapped. If the create ==1, it will make sure
  1103. * the buffer head is mapped.
  1104. *
  1105. * It returns 0 if plain look up failed (blocks have not been allocated), in
  1106. * that casem, buffer head is unmapped
  1107. *
  1108. * It returns the error in case of allocation failure.
  1109. */
  1110. int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
  1111. unsigned int max_blocks, struct buffer_head *bh,
  1112. int flags)
  1113. {
  1114. int retval;
  1115. clear_buffer_mapped(bh);
  1116. clear_buffer_unwritten(bh);
  1117. ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
  1118. "logical block %lu\n", inode->i_ino, flags, max_blocks,
  1119. (unsigned long)block);
  1120. /*
  1121. * Try to see if we can get the block without requesting a new
  1122. * file system block.
  1123. */
  1124. down_read((&EXT4_I(inode)->i_data_sem));
  1125. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  1126. retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
  1127. bh, 0);
  1128. } else {
  1129. retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
  1130. bh, 0);
  1131. }
  1132. up_read((&EXT4_I(inode)->i_data_sem));
  1133. if (retval > 0 && buffer_mapped(bh)) {
  1134. int ret = check_block_validity(inode, "file system corruption",
  1135. block, bh->b_blocknr, retval);
  1136. if (ret != 0)
  1137. return ret;
  1138. }
  1139. /* If it is only a block(s) look up */
  1140. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
  1141. return retval;
  1142. /*
  1143. * Returns if the blocks have already allocated
  1144. *
  1145. * Note that if blocks have been preallocated
  1146. * ext4_ext_get_block() returns th create = 0
  1147. * with buffer head unmapped.
  1148. */
  1149. if (retval > 0 && buffer_mapped(bh))
  1150. return retval;
  1151. /*
  1152. * When we call get_blocks without the create flag, the
  1153. * BH_Unwritten flag could have gotten set if the blocks
  1154. * requested were part of a uninitialized extent. We need to
  1155. * clear this flag now that we are committed to convert all or
  1156. * part of the uninitialized extent to be an initialized
  1157. * extent. This is because we need to avoid the combination
  1158. * of BH_Unwritten and BH_Mapped flags being simultaneously
  1159. * set on the buffer_head.
  1160. */
  1161. clear_buffer_unwritten(bh);
  1162. /*
  1163. * New blocks allocate and/or writing to uninitialized extent
  1164. * will possibly result in updating i_data, so we take
  1165. * the write lock of i_data_sem, and call get_blocks()
  1166. * with create == 1 flag.
  1167. */
  1168. down_write((&EXT4_I(inode)->i_data_sem));
  1169. /*
  1170. * if the caller is from delayed allocation writeout path
  1171. * we have already reserved fs blocks for allocation
  1172. * let the underlying get_block() function know to
  1173. * avoid double accounting
  1174. */
  1175. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  1176. EXT4_I(inode)->i_delalloc_reserved_flag = 1;
  1177. /*
  1178. * We need to check for EXT4 here because migrate
  1179. * could have changed the inode type in between
  1180. */
  1181. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  1182. retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
  1183. bh, flags);
  1184. } else {
  1185. retval = ext4_ind_get_blocks(handle, inode, block,
  1186. max_blocks, bh, flags);
  1187. if (retval > 0 && buffer_new(bh)) {
  1188. /*
  1189. * We allocated new blocks which will result in
  1190. * i_data's format changing. Force the migrate
  1191. * to fail by clearing migrate flags
  1192. */
  1193. EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
  1194. }
  1195. /*
  1196. * Update reserved blocks/metadata blocks after successful
  1197. * block allocation which had been deferred till now. We don't
  1198. * support fallocate for non extent files. So we can update
  1199. * reserve space here.
  1200. */
  1201. if ((retval > 0) &&
  1202. (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
  1203. ext4_da_update_reserve_space(inode, retval, 1);
  1204. }
  1205. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  1206. EXT4_I(inode)->i_delalloc_reserved_flag = 0;
  1207. up_write((&EXT4_I(inode)->i_data_sem));
  1208. if (retval > 0 && buffer_mapped(bh)) {
  1209. int ret = check_block_validity(inode, "file system "
  1210. "corruption after allocation",
  1211. block, bh->b_blocknr, retval);
  1212. if (ret != 0)
  1213. return ret;
  1214. }
  1215. return retval;
  1216. }
  1217. /* Maximum number of blocks we map for direct IO at once. */
  1218. #define DIO_MAX_BLOCKS 4096
  1219. int ext4_get_block(struct inode *inode, sector_t iblock,
  1220. struct buffer_head *bh_result, int create)
  1221. {
  1222. handle_t *handle = ext4_journal_current_handle();
  1223. int ret = 0, started = 0;
  1224. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  1225. int dio_credits;
  1226. if (create && !handle) {
  1227. /* Direct IO write... */
  1228. if (max_blocks > DIO_MAX_BLOCKS)
  1229. max_blocks = DIO_MAX_BLOCKS;
  1230. dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
  1231. handle = ext4_journal_start(inode, dio_credits);
  1232. if (IS_ERR(handle)) {
  1233. ret = PTR_ERR(handle);
  1234. goto out;
  1235. }
  1236. started = 1;
  1237. }
  1238. ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
  1239. create ? EXT4_GET_BLOCKS_CREATE : 0);
  1240. if (ret > 0) {
  1241. bh_result->b_size = (ret << inode->i_blkbits);
  1242. ret = 0;
  1243. }
  1244. if (started)
  1245. ext4_journal_stop(handle);
  1246. out:
  1247. return ret;
  1248. }
  1249. /*
  1250. * `handle' can be NULL if create is zero
  1251. */
  1252. struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
  1253. ext4_lblk_t block, int create, int *errp)
  1254. {
  1255. struct buffer_head dummy;
  1256. int fatal = 0, err;
  1257. int flags = 0;
  1258. J_ASSERT(handle != NULL || create == 0);
  1259. dummy.b_state = 0;
  1260. dummy.b_blocknr = -1000;
  1261. buffer_trace_init(&dummy.b_history);
  1262. if (create)
  1263. flags |= EXT4_GET_BLOCKS_CREATE;
  1264. err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags);
  1265. /*
  1266. * ext4_get_blocks() returns number of blocks mapped. 0 in
  1267. * case of a HOLE.
  1268. */
  1269. if (err > 0) {
  1270. if (err > 1)
  1271. WARN_ON(1);
  1272. err = 0;
  1273. }
  1274. *errp = err;
  1275. if (!err && buffer_mapped(&dummy)) {
  1276. struct buffer_head *bh;
  1277. bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
  1278. if (!bh) {
  1279. *errp = -EIO;
  1280. goto err;
  1281. }
  1282. if (buffer_new(&dummy)) {
  1283. J_ASSERT(create != 0);
  1284. J_ASSERT(handle != NULL);
  1285. /*
  1286. * Now that we do not always journal data, we should
  1287. * keep in mind whether this should always journal the
  1288. * new buffer as metadata. For now, regular file
  1289. * writes use ext4_get_block instead, so it's not a
  1290. * problem.
  1291. */
  1292. lock_buffer(bh);
  1293. BUFFER_TRACE(bh, "call get_create_access");
  1294. fatal = ext4_journal_get_create_access(handle, bh);
  1295. if (!fatal && !buffer_uptodate(bh)) {
  1296. memset(bh->b_data, 0, inode->i_sb->s_blocksize);
  1297. set_buffer_uptodate(bh);
  1298. }
  1299. unlock_buffer(bh);
  1300. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  1301. err = ext4_handle_dirty_metadata(handle, inode, bh);
  1302. if (!fatal)
  1303. fatal = err;
  1304. } else {
  1305. BUFFER_TRACE(bh, "not a new buffer");
  1306. }
  1307. if (fatal) {
  1308. *errp = fatal;
  1309. brelse(bh);
  1310. bh = NULL;
  1311. }
  1312. return bh;
  1313. }
  1314. err:
  1315. return NULL;
  1316. }
  1317. struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
  1318. ext4_lblk_t block, int create, int *err)
  1319. {
  1320. struct buffer_head *bh;
  1321. bh = ext4_getblk(handle, inode, block, create, err);
  1322. if (!bh)
  1323. return bh;
  1324. if (buffer_uptodate(bh))
  1325. return bh;
  1326. ll_rw_block(READ_META, 1, &bh);
  1327. wait_on_buffer(bh);
  1328. if (buffer_uptodate(bh))
  1329. return bh;
  1330. put_bh(bh);
  1331. *err = -EIO;
  1332. return NULL;
  1333. }
  1334. static int walk_page_buffers(handle_t *handle,
  1335. struct buffer_head *head,
  1336. unsigned from,
  1337. unsigned to,
  1338. int *partial,
  1339. int (*fn)(handle_t *handle,
  1340. struct buffer_head *bh))
  1341. {
  1342. struct buffer_head *bh;
  1343. unsigned block_start, block_end;
  1344. unsigned blocksize = head->b_size;
  1345. int err, ret = 0;
  1346. struct buffer_head *next;
  1347. for (bh = head, block_start = 0;
  1348. ret == 0 && (bh != head || !block_start);
  1349. block_start = block_end, bh = next) {
  1350. next = bh->b_this_page;
  1351. block_end = block_start + blocksize;
  1352. if (block_end <= from || block_start >= to) {
  1353. if (partial && !buffer_uptodate(bh))
  1354. *partial = 1;
  1355. continue;
  1356. }
  1357. err = (*fn)(handle, bh);
  1358. if (!ret)
  1359. ret = err;
  1360. }
  1361. return ret;
  1362. }
  1363. /*
  1364. * To preserve ordering, it is essential that the hole instantiation and
  1365. * the data write be encapsulated in a single transaction. We cannot
  1366. * close off a transaction and start a new one between the ext4_get_block()
  1367. * and the commit_write(). So doing the jbd2_journal_start at the start of
  1368. * prepare_write() is the right place.
  1369. *
  1370. * Also, this function can nest inside ext4_writepage() ->
  1371. * block_write_full_page(). In that case, we *know* that ext4_writepage()
  1372. * has generated enough buffer credits to do the whole page. So we won't
  1373. * block on the journal in that case, which is good, because the caller may
  1374. * be PF_MEMALLOC.
  1375. *
  1376. * By accident, ext4 can be reentered when a transaction is open via
  1377. * quota file writes. If we were to commit the transaction while thus
  1378. * reentered, there can be a deadlock - we would be holding a quota
  1379. * lock, and the commit would never complete if another thread had a
  1380. * transaction open and was blocking on the quota lock - a ranking
  1381. * violation.
  1382. *
  1383. * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
  1384. * will _not_ run commit under these circumstances because handle->h_ref
  1385. * is elevated. We'll still have enough credits for the tiny quotafile
  1386. * write.
  1387. */
  1388. static int do_journal_get_write_access(handle_t *handle,
  1389. struct buffer_head *bh)
  1390. {
  1391. if (!buffer_mapped(bh) || buffer_freed(bh))
  1392. return 0;
  1393. return ext4_journal_get_write_access(handle, bh);
  1394. }
  1395. /*
  1396. * Truncate blocks that were not used by write. We have to truncate the
  1397. * pagecache as well so that corresponding buffers get properly unmapped.
  1398. */
  1399. static void ext4_truncate_failed_write(struct inode *inode)
  1400. {
  1401. truncate_inode_pages(inode->i_mapping, inode->i_size);
  1402. ext4_truncate(inode);
  1403. }
  1404. static int ext4_write_begin(struct file *file, struct address_space *mapping,
  1405. loff_t pos, unsigned len, unsigned flags,
  1406. struct page **pagep, void **fsdata)
  1407. {
  1408. struct inode *inode = mapping->host;
  1409. int ret, needed_blocks;
  1410. handle_t *handle;
  1411. int retries = 0;
  1412. struct page *page;
  1413. pgoff_t index;
  1414. unsigned from, to;
  1415. trace_ext4_write_begin(inode, pos, len, flags);
  1416. /*
  1417. * Reserve one block more for addition to orphan list in case
  1418. * we allocate blocks but write fails for some reason
  1419. */
  1420. needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
  1421. index = pos >> PAGE_CACHE_SHIFT;
  1422. from = pos & (PAGE_CACHE_SIZE - 1);
  1423. to = from + len;
  1424. retry:
  1425. handle = ext4_journal_start(inode, needed_blocks);
  1426. if (IS_ERR(handle)) {
  1427. ret = PTR_ERR(handle);
  1428. goto out;
  1429. }
  1430. /* We cannot recurse into the filesystem as the transaction is already
  1431. * started */
  1432. flags |= AOP_FLAG_NOFS;
  1433. page = grab_cache_page_write_begin(mapping, index, flags);
  1434. if (!page) {
  1435. ext4_journal_stop(handle);
  1436. ret = -ENOMEM;
  1437. goto out;
  1438. }
  1439. *pagep = page;
  1440. ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  1441. ext4_get_block);
  1442. if (!ret && ext4_should_journal_data(inode)) {
  1443. ret = walk_page_buffers(handle, page_buffers(page),
  1444. from, to, NULL, do_journal_get_write_access);
  1445. }
  1446. if (ret) {
  1447. unlock_page(page);
  1448. page_cache_release(page);
  1449. /*
  1450. * block_write_begin may have instantiated a few blocks
  1451. * outside i_size. Trim these off again. Don't need
  1452. * i_size_read because we hold i_mutex.
  1453. *
  1454. * Add inode to orphan list in case we crash before
  1455. * truncate finishes
  1456. */
  1457. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  1458. ext4_orphan_add(handle, inode);
  1459. ext4_journal_stop(handle);
  1460. if (pos + len > inode->i_size) {
  1461. ext4_truncate_failed_write(inode);
  1462. /*
  1463. * If truncate failed early the inode might
  1464. * still be on the orphan list; we need to
  1465. * make sure the inode is removed from the
  1466. * orphan list in that case.
  1467. */
  1468. if (inode->i_nlink)
  1469. ext4_orphan_del(NULL, inode);
  1470. }
  1471. }
  1472. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  1473. goto retry;
  1474. out:
  1475. return ret;
  1476. }
  1477. /* For write_end() in data=journal mode */
  1478. static int write_end_fn(handle_t *handle, struct buffer_head *bh)
  1479. {
  1480. if (!buffer_mapped(bh) || buffer_freed(bh))
  1481. return 0;
  1482. set_buffer_uptodate(bh);
  1483. return ext4_handle_dirty_metadata(handle, NULL, bh);
  1484. }
  1485. static int ext4_generic_write_end(struct file *file,
  1486. struct address_space *mapping,
  1487. loff_t pos, unsigned len, unsigned copied,
  1488. struct page *page, void *fsdata)
  1489. {
  1490. int i_size_changed = 0;
  1491. struct inode *inode = mapping->host;
  1492. handle_t *handle = ext4_journal_current_handle();
  1493. copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
  1494. /*
  1495. * No need to use i_size_read() here, the i_size
  1496. * cannot change under us because we hold i_mutex.
  1497. *
  1498. * But it's important to update i_size while still holding page lock:
  1499. * page writeout could otherwise come in and zero beyond i_size.
  1500. */
  1501. if (pos + copied > inode->i_size) {
  1502. i_size_write(inode, pos + copied);
  1503. i_size_changed = 1;
  1504. }
  1505. if (pos + copied > EXT4_I(inode)->i_disksize) {
  1506. /* We need to mark inode dirty even if
  1507. * new_i_size is less that inode->i_size
  1508. * bu greater than i_disksize.(hint delalloc)
  1509. */
  1510. ext4_update_i_disksize(inode, (pos + copied));
  1511. i_size_changed = 1;
  1512. }
  1513. unlock_page(page);
  1514. page_cache_release(page);
  1515. /*
  1516. * Don't mark the inode dirty under page lock. First, it unnecessarily
  1517. * makes the holding time of page lock longer. Second, it forces lock
  1518. * ordering of page lock and transaction start for journaling
  1519. * filesystems.
  1520. */
  1521. if (i_size_changed)
  1522. ext4_mark_inode_dirty(handle, inode);
  1523. return copied;
  1524. }
  1525. /*
  1526. * We need to pick up the new inode size which generic_commit_write gave us
  1527. * `file' can be NULL - eg, when called from page_symlink().
  1528. *
  1529. * ext4 never places buffers on inode->i_mapping->private_list. metadata
  1530. * buffers are managed internally.
  1531. */
  1532. static int ext4_ordered_write_end(struct file *file,
  1533. struct address_space *mapping,
  1534. loff_t pos, unsigned len, unsigned copied,
  1535. struct page *page, void *fsdata)
  1536. {
  1537. handle_t *handle = ext4_journal_current_handle();
  1538. struct inode *inode = mapping->host;
  1539. int ret = 0, ret2;
  1540. trace_ext4_ordered_write_end(inode, pos, len, copied);
  1541. ret = ext4_jbd2_file_inode(handle, inode);
  1542. if (ret == 0) {
  1543. ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
  1544. page, fsdata);
  1545. copied = ret2;
  1546. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  1547. /* if we have allocated more blocks and copied
  1548. * less. We will have blocks allocated outside
  1549. * inode->i_size. So truncate them
  1550. */
  1551. ext4_orphan_add(handle, inode);
  1552. if (ret2 < 0)
  1553. ret = ret2;
  1554. }
  1555. ret2 = ext4_journal_stop(handle);
  1556. if (!ret)
  1557. ret = ret2;
  1558. if (pos + len > inode->i_size) {
  1559. ext4_truncate_failed_write(inode);
  1560. /*
  1561. * If truncate failed early the inode might still be
  1562. * on the orphan list; we need to make sure the inode
  1563. * is removed from the orphan list in that case.
  1564. */
  1565. if (inode->i_nlink)
  1566. ext4_orphan_del(NULL, inode);
  1567. }
  1568. return ret ? ret : copied;
  1569. }
  1570. static int ext4_writeback_write_end(struct file *file,
  1571. struct address_space *mapping,
  1572. loff_t pos, unsigned len, unsigned copied,
  1573. struct page *page, void *fsdata)
  1574. {
  1575. handle_t *handle = ext4_journal_current_handle();
  1576. struct inode *inode = mapping->host;
  1577. int ret = 0, ret2;
  1578. trace_ext4_writeback_write_end(inode, pos, len, copied);
  1579. ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
  1580. page, fsdata);
  1581. copied = ret2;
  1582. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  1583. /* if we have allocated more blocks and copied
  1584. * less. We will have blocks allocated outside
  1585. * inode->i_size. So truncate them
  1586. */
  1587. ext4_orphan_add(handle, inode);
  1588. if (ret2 < 0)
  1589. ret = ret2;
  1590. ret2 = ext4_journal_stop(handle);
  1591. if (!ret)
  1592. ret = ret2;
  1593. if (pos + len > inode->i_size) {
  1594. ext4_truncate_failed_write(inode);
  1595. /*
  1596. * If truncate failed early the inode might still be
  1597. * on the orphan list; we need to make sure the inode
  1598. * is removed from the orphan list in that case.
  1599. */
  1600. if (inode->i_nlink)
  1601. ext4_orphan_del(NULL, inode);
  1602. }
  1603. return ret ? ret : copied;
  1604. }
  1605. static int ext4_journalled_write_end(struct file *file,
  1606. struct address_space *mapping,
  1607. loff_t pos, unsigned len, unsigned copied,
  1608. struct page *page, void *fsdata)
  1609. {
  1610. handle_t *handle = ext4_journal_current_handle();
  1611. struct inode *inode = mapping->host;
  1612. int ret = 0, ret2;
  1613. int partial = 0;
  1614. unsigned from, to;
  1615. loff_t new_i_size;
  1616. trace_ext4_journalled_write_end(inode, pos, len, copied);
  1617. from = pos & (PAGE_CACHE_SIZE - 1);
  1618. to = from + len;
  1619. if (copied < len) {
  1620. if (!PageUptodate(page))
  1621. copied = 0;
  1622. page_zero_new_buffers(page, from+copied, to);
  1623. }
  1624. ret = walk_page_buffers(handle, page_buffers(page), from,
  1625. to, &partial, write_end_fn);
  1626. if (!partial)
  1627. SetPageUptodate(page);
  1628. new_i_size = pos + copied;
  1629. if (new_i_size > inode->i_size)
  1630. i_size_write(inode, pos+copied);
  1631. EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
  1632. if (new_i_size > EXT4_I(inode)->i_disksize) {
  1633. ext4_update_i_disksize(inode, new_i_size);
  1634. ret2 = ext4_mark_inode_dirty(handle, inode);
  1635. if (!ret)
  1636. ret = ret2;
  1637. }
  1638. unlock_page(page);
  1639. page_cache_release(page);
  1640. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  1641. /* if we have allocated more blocks and copied
  1642. * less. We will have blocks allocated outside
  1643. * inode->i_size. So truncate them
  1644. */
  1645. ext4_orphan_add(handle, inode);
  1646. ret2 = ext4_journal_stop(handle);
  1647. if (!ret)
  1648. ret = ret2;
  1649. if (pos + len > inode->i_size) {
  1650. ext4_truncate_failed_write(inode);
  1651. /*
  1652. * If truncate failed early the inode might still be
  1653. * on the orphan list; we need to make sure the inode
  1654. * is removed from the orphan list in that case.
  1655. */
  1656. if (inode->i_nlink)
  1657. ext4_orphan_del(NULL, inode);
  1658. }
  1659. return ret ? ret : copied;
  1660. }
  1661. /*
  1662. * Reserve a single block located at lblock
  1663. */
  1664. static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
  1665. {
  1666. int retries = 0;
  1667. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1668. struct ext4_inode_info *ei = EXT4_I(inode);
  1669. unsigned long md_needed, md_reserved;
  1670. int ret;
  1671. /*
  1672. * recalculate the amount of metadata blocks to reserve
  1673. * in order to allocate nrblocks
  1674. * worse case is one extent per block
  1675. */
  1676. repeat:
  1677. spin_lock(&ei->i_block_reservation_lock);
  1678. md_reserved = ei->i_reserved_meta_blocks;
  1679. md_needed = ext4_calc_metadata_amount(inode, lblock);
  1680. spin_unlock(&ei->i_block_reservation_lock);
  1681. /*
  1682. * Make quota reservation here to prevent quota overflow
  1683. * later. Real quota accounting is done at pages writeout
  1684. * time.
  1685. */
  1686. ret = dquot_reserve_block(inode, md_needed + 1);
  1687. if (ret)
  1688. return ret;
  1689. if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
  1690. dquot_release_reservation_block(inode, md_needed + 1);
  1691. if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
  1692. yield();
  1693. goto repeat;
  1694. }
  1695. return -ENOSPC;
  1696. }
  1697. spin_lock(&ei->i_block_reservation_lock);
  1698. ei->i_reserved_data_blocks++;
  1699. ei->i_reserved_meta_blocks += md_needed;
  1700. spin_unlock(&ei->i_block_reservation_lock);
  1701. return 0; /* success */
  1702. }
  1703. static void ext4_da_release_space(struct inode *inode, int to_free)
  1704. {
  1705. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1706. struct ext4_inode_info *ei = EXT4_I(inode);
  1707. if (!to_free)
  1708. return; /* Nothing to release, exit */
  1709. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  1710. if (unlikely(to_free > ei->i_reserved_data_blocks)) {
  1711. /*
  1712. * if there aren't enough reserved blocks, then the
  1713. * counter is messed up somewhere. Since this
  1714. * function is called from invalidate page, it's
  1715. * harmless to return without any action.
  1716. */
  1717. ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
  1718. "ino %lu, to_free %d with only %d reserved "
  1719. "data blocks\n", inode->i_ino, to_free,
  1720. ei->i_reserved_data_blocks);
  1721. WARN_ON(1);
  1722. to_free = ei->i_reserved_data_blocks;
  1723. }
  1724. ei->i_reserved_data_blocks -= to_free;
  1725. if (ei->i_reserved_data_blocks == 0) {
  1726. /*
  1727. * We can release all of the reserved metadata blocks
  1728. * only when we have written all of the delayed
  1729. * allocation blocks.
  1730. */
  1731. to_free += ei->i_reserved_meta_blocks;
  1732. ei->i_reserved_meta_blocks = 0;
  1733. ei->i_da_metadata_calc_len = 0;
  1734. }
  1735. /* update fs dirty blocks counter */
  1736. percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
  1737. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1738. dquot_release_reservation_block(inode, to_free);
  1739. }
  1740. static void ext4_da_page_release_reservation(struct page *page,
  1741. unsigned long offset)
  1742. {
  1743. int to_release = 0;
  1744. struct buffer_head *head, *bh;
  1745. unsigned int curr_off = 0;
  1746. head = page_buffers(page);
  1747. bh = head;
  1748. do {
  1749. unsigned int next_off = curr_off + bh->b_size;
  1750. if ((offset <= curr_off) && (buffer_delay(bh))) {
  1751. to_release++;
  1752. clear_buffer_delay(bh);
  1753. }
  1754. curr_off = next_off;
  1755. } while ((bh = bh->b_this_page) != head);
  1756. ext4_da_release_space(page->mapping->host, to_release);
  1757. }
  1758. /*
  1759. * Delayed allocation stuff
  1760. */
  1761. /*
  1762. * mpage_da_submit_io - walks through extent of pages and try to write
  1763. * them with writepage() call back
  1764. *
  1765. * @mpd->inode: inode
  1766. * @mpd->first_page: first page of the extent
  1767. * @mpd->next_page: page after the last page of the extent
  1768. *
  1769. * By the time mpage_da_submit_io() is called we expect all blocks
  1770. * to be allocated. this may be wrong if allocation failed.
  1771. *
  1772. * As pages are already locked by write_cache_pages(), we can't use it
  1773. */
  1774. static int mpage_da_submit_io(struct mpage_da_data *mpd)
  1775. {
  1776. long pages_skipped;
  1777. struct pagevec pvec;
  1778. unsigned long index, end;
  1779. int ret = 0, err, nr_pages, i;
  1780. struct inode *inode = mpd->inode;
  1781. struct address_space *mapping = inode->i_mapping;
  1782. BUG_ON(mpd->next_page <= mpd->first_page);
  1783. /*
  1784. * We need to start from the first_page to the next_page - 1
  1785. * to make sure we also write the mapped dirty buffer_heads.
  1786. * If we look at mpd->b_blocknr we would only be looking
  1787. * at the currently mapped buffer_heads.
  1788. */
  1789. index = mpd->first_page;
  1790. end = mpd->next_page - 1;
  1791. pagevec_init(&pvec, 0);
  1792. while (index <= end) {
  1793. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1794. if (nr_pages == 0)
  1795. break;
  1796. for (i = 0; i < nr_pages; i++) {
  1797. struct page *page = pvec.pages[i];
  1798. index = page->index;
  1799. if (index > end)
  1800. break;
  1801. index++;
  1802. BUG_ON(!PageLocked(page));
  1803. BUG_ON(PageWriteback(page));
  1804. pages_skipped = mpd->wbc->pages_skipped;
  1805. err = mapping->a_ops->writepage(page, mpd->wbc);
  1806. if (!err && (pages_skipped == mpd->wbc->pages_skipped))
  1807. /*
  1808. * have successfully written the page
  1809. * without skipping the same
  1810. */
  1811. mpd->pages_written++;
  1812. /*
  1813. * In error case, we have to continue because
  1814. * remaining pages are still locked
  1815. * XXX: unlock and re-dirty them?
  1816. */
  1817. if (ret == 0)
  1818. ret = err;
  1819. }
  1820. pagevec_release(&pvec);
  1821. }
  1822. return ret;
  1823. }
  1824. /*
  1825. * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
  1826. *
  1827. * @mpd->inode - inode to walk through
  1828. * @exbh->b_blocknr - first block on a disk
  1829. * @exbh->b_size - amount of space in bytes
  1830. * @logical - first logical block to start assignment with
  1831. *
  1832. * the function goes through all passed space and put actual disk
  1833. * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
  1834. */
  1835. static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
  1836. struct buffer_head *exbh)
  1837. {
  1838. struct inode *inode = mpd->inode;
  1839. struct address_space *mapping = inode->i_mapping;
  1840. int blocks = exbh->b_size >> inode->i_blkbits;
  1841. sector_t pblock = exbh->b_blocknr, cur_logical;
  1842. struct buffer_head *head, *bh;
  1843. pgoff_t index, end;
  1844. struct pagevec pvec;
  1845. int nr_pages, i;
  1846. index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1847. end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1848. cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1849. pagevec_init(&pvec, 0);
  1850. while (index <= end) {
  1851. /* XXX: optimize tail */
  1852. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1853. if (nr_pages == 0)
  1854. break;
  1855. for (i = 0; i < nr_pages; i++) {
  1856. struct page *page = pvec.pages[i];
  1857. index = page->index;
  1858. if (index > end)
  1859. break;
  1860. index++;
  1861. BUG_ON(!PageLocked(page));
  1862. BUG_ON(PageWriteback(page));
  1863. BUG_ON(!page_has_buffers(page));
  1864. bh = page_buffers(page);
  1865. head = bh;
  1866. /* skip blocks out of the range */
  1867. do {
  1868. if (cur_logical >= logical)
  1869. break;
  1870. cur_logical++;
  1871. } while ((bh = bh->b_this_page) != head);
  1872. do {
  1873. if (cur_logical >= logical + blocks)
  1874. break;
  1875. if (buffer_delay(bh) ||
  1876. buffer_unwritten(bh)) {
  1877. BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
  1878. if (buffer_delay(bh)) {
  1879. clear_buffer_delay(bh);
  1880. bh->b_blocknr = pblock;
  1881. } else {
  1882. /*
  1883. * unwritten already should have
  1884. * blocknr assigned. Verify that
  1885. */
  1886. clear_buffer_unwritten(bh);
  1887. BUG_ON(bh->b_blocknr != pblock);
  1888. }
  1889. } else if (buffer_mapped(bh))
  1890. BUG_ON(bh->b_blocknr != pblock);
  1891. cur_logical++;
  1892. pblock++;
  1893. } while ((bh = bh->b_this_page) != head);
  1894. }
  1895. pagevec_release(&pvec);
  1896. }
  1897. }
  1898. /*
  1899. * __unmap_underlying_blocks - just a helper function to unmap
  1900. * set of blocks described by @bh
  1901. */
  1902. static inline void __unmap_underlying_blocks(struct inode *inode,
  1903. struct buffer_head *bh)
  1904. {
  1905. struct block_device *bdev = inode->i_sb->s_bdev;
  1906. int blocks, i;
  1907. blocks = bh->b_size >> inode->i_blkbits;
  1908. for (i = 0; i < blocks; i++)
  1909. unmap_underlying_metadata(bdev, bh->b_blocknr + i);
  1910. }
  1911. static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
  1912. sector_t logical, long blk_cnt)
  1913. {
  1914. int nr_pages, i;
  1915. pgoff_t index, end;
  1916. struct pagevec pvec;
  1917. struct inode *inode = mpd->inode;
  1918. struct address_space *mapping = inode->i_mapping;
  1919. index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1920. end = (logical + blk_cnt - 1) >>
  1921. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1922. while (index <= end) {
  1923. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1924. if (nr_pages == 0)
  1925. break;
  1926. for (i = 0; i < nr_pages; i++) {
  1927. struct page *page = pvec.pages[i];
  1928. index = page->index;
  1929. if (index > end)
  1930. break;
  1931. index++;
  1932. BUG_ON(!PageLocked(page));
  1933. BUG_ON(PageWriteback(page));
  1934. block_invalidatepage(page, 0);
  1935. ClearPageUptodate(page);
  1936. unlock_page(page);
  1937. }
  1938. }
  1939. return;
  1940. }
  1941. static void ext4_print_free_blocks(struct inode *inode)
  1942. {
  1943. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1944. printk(KERN_CRIT "Total free blocks count %lld\n",
  1945. ext4_count_free_blocks(inode->i_sb));
  1946. printk(KERN_CRIT "Free/Dirty block details\n");
  1947. printk(KERN_CRIT "free_blocks=%lld\n",
  1948. (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
  1949. printk(KERN_CRIT "dirty_blocks=%lld\n",
  1950. (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
  1951. printk(KERN_CRIT "Block reservation details\n");
  1952. printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
  1953. EXT4_I(inode)->i_reserved_data_blocks);
  1954. printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
  1955. EXT4_I(inode)->i_reserved_meta_blocks);
  1956. return;
  1957. }
  1958. /*
  1959. * mpage_da_map_blocks - go through given space
  1960. *
  1961. * @mpd - bh describing space
  1962. *
  1963. * The function skips space we know is already mapped to disk blocks.
  1964. *
  1965. */
  1966. static int mpage_da_map_blocks(struct mpage_da_data *mpd)
  1967. {
  1968. int err, blks, get_blocks_flags;
  1969. struct buffer_head new;
  1970. sector_t next = mpd->b_blocknr;
  1971. unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
  1972. loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
  1973. handle_t *handle = NULL;
  1974. /*
  1975. * We consider only non-mapped and non-allocated blocks
  1976. */
  1977. if ((mpd->b_state & (1 << BH_Mapped)) &&
  1978. !(mpd->b_state & (1 << BH_Delay)) &&
  1979. !(mpd->b_state & (1 << BH_Unwritten)))
  1980. return 0;
  1981. /*
  1982. * If we didn't accumulate anything to write simply return
  1983. */
  1984. if (!mpd->b_size)
  1985. return 0;
  1986. handle = ext4_journal_current_handle();
  1987. BUG_ON(!handle);
  1988. /*
  1989. * Call ext4_get_blocks() to allocate any delayed allocation
  1990. * blocks, or to convert an uninitialized extent to be
  1991. * initialized (in the case where we have written into
  1992. * one or more preallocated blocks).
  1993. *
  1994. * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
  1995. * indicate that we are on the delayed allocation path. This
  1996. * affects functions in many different parts of the allocation
  1997. * call path. This flag exists primarily because we don't
  1998. * want to change *many* call functions, so ext4_get_blocks()
  1999. * will set the magic i_delalloc_reserved_flag once the
  2000. * inode's allocation semaphore is taken.
  2001. *
  2002. * If the blocks in questions were delalloc blocks, set
  2003. * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
  2004. * variables are updated after the blocks have been allocated.
  2005. */
  2006. new.b_state = 0;
  2007. get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
  2008. if (mpd->b_state & (1 << BH_Delay))
  2009. get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
  2010. blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
  2011. &new, get_blocks_flags);
  2012. if (blks < 0) {
  2013. err = blks;
  2014. /*
  2015. * If get block returns with error we simply
  2016. * return. Later writepage will redirty the page and
  2017. * writepages will find the dirty page again
  2018. */
  2019. if (err == -EAGAIN)
  2020. return 0;
  2021. if (err == -ENOSPC &&
  2022. ext4_count_free_blocks(mpd->inode->i_sb)) {
  2023. mpd->retval = err;
  2024. return 0;
  2025. }
  2026. /*
  2027. * get block failure will cause us to loop in
  2028. * writepages, because a_ops->writepage won't be able
  2029. * to make progress. The page will be redirtied by
  2030. * writepage and writepages will again try to write
  2031. * the same.
  2032. */
  2033. ext4_msg(mpd->inode->i_sb, KERN_CRIT,
  2034. "delayed block allocation failed for inode %lu at "
  2035. "logical offset %llu with max blocks %zd with "
  2036. "error %d\n", mpd->inode->i_ino,
  2037. (unsigned long long) next,
  2038. mpd->b_size >> mpd->inode->i_blkbits, err);
  2039. printk(KERN_CRIT "This should not happen!! "
  2040. "Data will be lost\n");
  2041. if (err == -ENOSPC) {
  2042. ext4_print_free_blocks(mpd->inode);
  2043. }
  2044. /* invalidate all the pages */
  2045. ext4_da_block_invalidatepages(mpd, next,
  2046. mpd->b_size >> mpd->inode->i_blkbits);
  2047. return err;
  2048. }
  2049. BUG_ON(blks == 0);
  2050. new.b_size = (blks << mpd->inode->i_blkbits);
  2051. if (buffer_new(&new))
  2052. __unmap_underlying_blocks(mpd->inode, &new);
  2053. /*
  2054. * If blocks are delayed marked, we need to
  2055. * put actual blocknr and drop delayed bit
  2056. */
  2057. if ((mpd->b_state & (1 << BH_Delay)) ||
  2058. (mpd->b_state & (1 << BH_Unwritten)))
  2059. mpage_put_bnr_to_bhs(mpd, next, &new);
  2060. if (ext4_should_order_data(mpd->inode)) {
  2061. err = ext4_jbd2_file_inode(handle, mpd->inode);
  2062. if (err)
  2063. return err;
  2064. }
  2065. /*
  2066. * Update on-disk size along with block allocation.
  2067. */
  2068. disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
  2069. if (disksize > i_size_read(mpd->inode))
  2070. disksize = i_size_read(mpd->inode);
  2071. if (disksize > EXT4_I(mpd->inode)->i_disksize) {
  2072. ext4_update_i_disksize(mpd->inode, disksize);
  2073. return ext4_mark_inode_dirty(handle, mpd->inode);
  2074. }
  2075. return 0;
  2076. }
  2077. #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
  2078. (1 << BH_Delay) | (1 << BH_Unwritten))
  2079. /*
  2080. * mpage_add_bh_to_extent - try to add one more block to extent of blocks
  2081. *
  2082. * @mpd->lbh - extent of blocks
  2083. * @logical - logical number of the block in the file
  2084. * @bh - bh of the block (used to access block's state)
  2085. *
  2086. * the function is used to collect contig. blocks in same state
  2087. */
  2088. static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
  2089. sector_t logical, size_t b_size,
  2090. unsigned long b_state)
  2091. {
  2092. sector_t next;
  2093. int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
  2094. /* check if thereserved journal credits might overflow */
  2095. if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
  2096. if (nrblocks >= EXT4_MAX_TRANS_DATA) {
  2097. /*
  2098. * With non-extent format we are limited by the journal
  2099. * credit available. Total credit needed to insert
  2100. * nrblocks contiguous blocks is dependent on the
  2101. * nrblocks. So limit nrblocks.
  2102. */
  2103. goto flush_it;
  2104. } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
  2105. EXT4_MAX_TRANS_DATA) {
  2106. /*
  2107. * Adding the new buffer_head would make it cross the
  2108. * allowed limit for which we have journal credit
  2109. * reserved. So limit the new bh->b_size
  2110. */
  2111. b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
  2112. mpd->inode->i_blkbits;
  2113. /* we will do mpage_da_submit_io in the next loop */
  2114. }
  2115. }
  2116. /*
  2117. * First block in the extent
  2118. */
  2119. if (mpd->b_size == 0) {
  2120. mpd->b_blocknr = logical;
  2121. mpd->b_size = b_size;
  2122. mpd->b_state = b_state & BH_FLAGS;
  2123. return;
  2124. }
  2125. next = mpd->b_blocknr + nrblocks;
  2126. /*
  2127. * Can we merge the block to our big extent?
  2128. */
  2129. if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
  2130. mpd->b_size += b_size;
  2131. return;
  2132. }
  2133. flush_it:
  2134. /*
  2135. * We couldn't merge the block to our extent, so we
  2136. * need to flush current extent and start new one
  2137. */
  2138. if (mpage_da_map_blocks(mpd) == 0)
  2139. mpage_da_submit_io(mpd);
  2140. mpd->io_done = 1;
  2141. return;
  2142. }
  2143. static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
  2144. {
  2145. return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
  2146. }
  2147. /*
  2148. * __mpage_da_writepage - finds extent of pages and blocks
  2149. *
  2150. * @page: page to consider
  2151. * @wbc: not used, we just follow rules
  2152. * @data: context
  2153. *
  2154. * The function finds extents of pages and scan them for all blocks.
  2155. */
  2156. static int __mpage_da_writepage(struct page *page,
  2157. struct writeback_control *wbc, void *data)
  2158. {
  2159. struct mpage_da_data *mpd = data;
  2160. struct inode *inode = mpd->inode;
  2161. struct buffer_head *bh, *head;
  2162. sector_t logical;
  2163. if (mpd->io_done) {
  2164. /*
  2165. * Rest of the page in the page_vec
  2166. * redirty then and skip then. We will
  2167. * try to write them again after
  2168. * starting a new transaction
  2169. */
  2170. redirty_page_for_writepage(wbc, page);
  2171. unlock_page(page);
  2172. return MPAGE_DA_EXTENT_TAIL;
  2173. }
  2174. /*
  2175. * Can we merge this page to current extent?
  2176. */
  2177. if (mpd->next_page != page->index) {
  2178. /*
  2179. * Nope, we can't. So, we map non-allocated blocks
  2180. * and start IO on them using writepage()
  2181. */
  2182. if (mpd->next_page != mpd->first_page) {
  2183. if (mpage_da_map_blocks(mpd) == 0)
  2184. mpage_da_submit_io(mpd);
  2185. /*
  2186. * skip rest of the page in the page_vec
  2187. */
  2188. mpd->io_done = 1;
  2189. redirty_page_for_writepage(wbc, page);
  2190. unlock_page(page);
  2191. return MPAGE_DA_EXTENT_TAIL;
  2192. }
  2193. /*
  2194. * Start next extent of pages ...
  2195. */
  2196. mpd->first_page = page->index;
  2197. /*
  2198. * ... and blocks
  2199. */
  2200. mpd->b_size = 0;
  2201. mpd->b_state = 0;
  2202. mpd->b_blocknr = 0;
  2203. }
  2204. mpd->next_page = page->index + 1;
  2205. logical = (sector_t) page->index <<
  2206. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  2207. if (!page_has_buffers(page)) {
  2208. mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
  2209. (1 << BH_Dirty) | (1 << BH_Uptodate));
  2210. if (mpd->io_done)
  2211. return MPAGE_DA_EXTENT_TAIL;
  2212. } else {
  2213. /*
  2214. * Page with regular buffer heads, just add all dirty ones
  2215. */
  2216. head = page_buffers(page);
  2217. bh = head;
  2218. do {
  2219. BUG_ON(buffer_locked(bh));
  2220. /*
  2221. * We need to try to allocate
  2222. * unmapped blocks in the same page.
  2223. * Otherwise we won't make progress
  2224. * with the page in ext4_writepage
  2225. */
  2226. if (ext4_bh_delay_or_unwritten(NULL, bh)) {
  2227. mpage_add_bh_to_extent(mpd, logical,
  2228. bh->b_size,
  2229. bh->b_state);
  2230. if (mpd->io_done)
  2231. return MPAGE_DA_EXTENT_TAIL;
  2232. } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
  2233. /*
  2234. * mapped dirty buffer. We need to update
  2235. * the b_state because we look at
  2236. * b_state in mpage_da_map_blocks. We don't
  2237. * update b_size because if we find an
  2238. * unmapped buffer_head later we need to
  2239. * use the b_state flag of that buffer_head.
  2240. */
  2241. if (mpd->b_size == 0)
  2242. mpd->b_state = bh->b_state & BH_FLAGS;
  2243. }
  2244. logical++;
  2245. } while ((bh = bh->b_this_page) != head);
  2246. }
  2247. return 0;
  2248. }
  2249. /*
  2250. * This is a special get_blocks_t callback which is used by
  2251. * ext4_da_write_begin(). It will either return mapped block or
  2252. * reserve space for a single block.
  2253. *
  2254. * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
  2255. * We also have b_blocknr = -1 and b_bdev initialized properly
  2256. *
  2257. * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
  2258. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
  2259. * initialized properly.
  2260. */
  2261. static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
  2262. struct buffer_head *bh_result, int create)
  2263. {
  2264. int ret = 0;
  2265. sector_t invalid_block = ~((sector_t) 0xffff);
  2266. if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
  2267. invalid_block = ~0;
  2268. BUG_ON(create == 0);
  2269. BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
  2270. /*
  2271. * first, we need to know whether the block is allocated already
  2272. * preallocated blocks are unmapped but should treated
  2273. * the same as allocated blocks.
  2274. */
  2275. ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0);
  2276. if ((ret == 0) && !buffer_delay(bh_result)) {
  2277. /* the block isn't (pre)allocated yet, let's reserve space */
  2278. /*
  2279. * XXX: __block_prepare_write() unmaps passed block,
  2280. * is it OK?
  2281. */
  2282. ret = ext4_da_reserve_space(inode, iblock);
  2283. if (ret)
  2284. /* not enough space to reserve */
  2285. return ret;
  2286. map_bh(bh_result, inode->i_sb, invalid_block);
  2287. set_buffer_new(bh_result);
  2288. set_buffer_delay(bh_result);
  2289. } else if (ret > 0) {
  2290. bh_result->b_size = (ret << inode->i_blkbits);
  2291. if (buffer_unwritten(bh_result)) {
  2292. /* A delayed write to unwritten bh should
  2293. * be marked new and mapped. Mapped ensures
  2294. * that we don't do get_block multiple times
  2295. * when we write to the same offset and new
  2296. * ensures that we do proper zero out for
  2297. * partial write.
  2298. */
  2299. set_buffer_new(bh_result);
  2300. set_buffer_mapped(bh_result);
  2301. }
  2302. ret = 0;
  2303. }
  2304. return ret;
  2305. }
  2306. /*
  2307. * This function is used as a standard get_block_t calback function
  2308. * when there is no desire to allocate any blocks. It is used as a
  2309. * callback function for block_prepare_write(), nobh_writepage(), and
  2310. * block_write_full_page(). These functions should only try to map a
  2311. * single block at a time.
  2312. *
  2313. * Since this function doesn't do block allocations even if the caller
  2314. * requests it by passing in create=1, it is critically important that
  2315. * any caller checks to make sure that any buffer heads are returned
  2316. * by this function are either all already mapped or marked for
  2317. * delayed allocation before calling nobh_writepage() or
  2318. * block_write_full_page(). Otherwise, b_blocknr could be left
  2319. * unitialized, and the page write functions will be taken by
  2320. * surprise.
  2321. */
  2322. static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
  2323. struct buffer_head *bh_result, int create)
  2324. {
  2325. int ret = 0;
  2326. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  2327. BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
  2328. /*
  2329. * we don't want to do block allocation in writepage
  2330. * so call get_block_wrap with create = 0
  2331. */
  2332. ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
  2333. if (ret > 0) {
  2334. bh_result->b_size = (ret << inode->i_blkbits);
  2335. ret = 0;
  2336. }
  2337. return ret;
  2338. }
  2339. static int bget_one(handle_t *handle, struct buffer_head *bh)
  2340. {
  2341. get_bh(bh);
  2342. return 0;
  2343. }
  2344. static int bput_one(handle_t *handle, struct buffer_head *bh)
  2345. {
  2346. put_bh(bh);
  2347. return 0;
  2348. }
  2349. static int __ext4_journalled_writepage(struct page *page,
  2350. unsigned int len)
  2351. {
  2352. struct address_space *mapping = page->mapping;
  2353. struct inode *inode = mapping->host;
  2354. struct buffer_head *page_bufs;
  2355. handle_t *handle = NULL;
  2356. int ret = 0;
  2357. int err;
  2358. page_bufs = page_buffers(page);
  2359. BUG_ON(!page_bufs);
  2360. walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
  2361. /* As soon as we unlock the page, it can go away, but we have
  2362. * references to buffers so we are safe */
  2363. unlock_page(page);
  2364. handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
  2365. if (IS_ERR(handle)) {
  2366. ret = PTR_ERR(handle);
  2367. goto out;
  2368. }
  2369. ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
  2370. do_journal_get_write_access);
  2371. err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
  2372. write_end_fn);
  2373. if (ret == 0)
  2374. ret = err;
  2375. err = ext4_journal_stop(handle);
  2376. if (!ret)
  2377. ret = err;
  2378. walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
  2379. EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
  2380. out:
  2381. return ret;
  2382. }
  2383. /*
  2384. * Note that we don't need to start a transaction unless we're journaling data
  2385. * because we should have holes filled from ext4_page_mkwrite(). We even don't
  2386. * need to file the inode to the transaction's list in ordered mode because if
  2387. * we are writing back data added by write(), the inode is already there and if
  2388. * we are writing back data modified via mmap(), noone guarantees in which
  2389. * transaction the data will hit the disk. In case we are journaling data, we
  2390. * cannot start transaction directly because transaction start ranks above page
  2391. * lock so we have to do some magic.
  2392. *
  2393. * This function can get called via...
  2394. * - ext4_da_writepages after taking page lock (have journal handle)
  2395. * - journal_submit_inode_data_buffers (no journal handle)
  2396. * - shrink_page_list via pdflush (no journal handle)
  2397. * - grab_page_cache when doing write_begin (have journal handle)
  2398. *
  2399. * We don't do any block allocation in this function. If we have page with
  2400. * multiple blocks we need to write those buffer_heads that are mapped. This
  2401. * is important for mmaped based write. So if we do with blocksize 1K
  2402. * truncate(f, 1024);
  2403. * a = mmap(f, 0, 4096);
  2404. * a[0] = 'a';
  2405. * truncate(f, 4096);
  2406. * we have in the page first buffer_head mapped via page_mkwrite call back
  2407. * but other bufer_heads would be unmapped but dirty(dirty done via the
  2408. * do_wp_page). So writepage should write the first block. If we modify
  2409. * the mmap area beyond 1024 we will again get a page_fault and the
  2410. * page_mkwrite callback will do the block allocation and mark the
  2411. * buffer_heads mapped.
  2412. *
  2413. * We redirty the page if we have any buffer_heads that is either delay or
  2414. * unwritten in the page.
  2415. *
  2416. * We can get recursively called as show below.
  2417. *
  2418. * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
  2419. * ext4_writepage()
  2420. *
  2421. * But since we don't do any block allocation we should not deadlock.
  2422. * Page also have the dirty flag cleared so we don't get recurive page_lock.
  2423. */
  2424. static int ext4_writepage(struct page *page,
  2425. struct writeback_control *wbc)
  2426. {
  2427. int ret = 0;
  2428. loff_t size;
  2429. unsigned int len;
  2430. struct buffer_head *page_bufs;
  2431. struct inode *inode = page->mapping->host;
  2432. trace_ext4_writepage(inode, page);
  2433. size = i_size_read(inode);
  2434. if (page->index == size >> PAGE_CACHE_SHIFT)
  2435. len = size & ~PAGE_CACHE_MASK;
  2436. else
  2437. len = PAGE_CACHE_SIZE;
  2438. if (page_has_buffers(page)) {
  2439. page_bufs = page_buffers(page);
  2440. if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
  2441. ext4_bh_delay_or_unwritten)) {
  2442. /*
  2443. * We don't want to do block allocation
  2444. * So redirty the page and return
  2445. * We may reach here when we do a journal commit
  2446. * via journal_submit_inode_data_buffers.
  2447. * If we don't have mapping block we just ignore
  2448. * them. We can also reach here via shrink_page_list
  2449. */
  2450. redirty_page_for_writepage(wbc, page);
  2451. unlock_page(page);
  2452. return 0;
  2453. }
  2454. } else {
  2455. /*
  2456. * The test for page_has_buffers() is subtle:
  2457. * We know the page is dirty but it lost buffers. That means
  2458. * that at some moment in time after write_begin()/write_end()
  2459. * has been called all buffers have been clean and thus they
  2460. * must have been written at least once. So they are all
  2461. * mapped and we can happily proceed with mapping them
  2462. * and writing the page.
  2463. *
  2464. * Try to initialize the buffer_heads and check whether
  2465. * all are mapped and non delay. We don't want to
  2466. * do block allocation here.
  2467. */
  2468. ret = block_prepare_write(page, 0, len,
  2469. noalloc_get_block_write);
  2470. if (!ret) {
  2471. page_bufs = page_buffers(page);
  2472. /* check whether all are mapped and non delay */
  2473. if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
  2474. ext4_bh_delay_or_unwritten)) {
  2475. redirty_page_for_writepage(wbc, page);
  2476. unlock_page(page);
  2477. return 0;
  2478. }
  2479. } else {
  2480. /*
  2481. * We can't do block allocation here
  2482. * so just redity the page and unlock
  2483. * and return
  2484. */
  2485. redirty_page_for_writepage(wbc, page);
  2486. unlock_page(page);
  2487. return 0;
  2488. }
  2489. /* now mark the buffer_heads as dirty and uptodate */
  2490. block_commit_write(page, 0, len);
  2491. }
  2492. if (PageChecked(page) && ext4_should_journal_data(inode)) {
  2493. /*
  2494. * It's mmapped pagecache. Add buffers and journal it. There
  2495. * doesn't seem much point in redirtying the page here.
  2496. */
  2497. ClearPageChecked(page);
  2498. return __ext4_journalled_writepage(page, len);
  2499. }
  2500. if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
  2501. ret = nobh_writepage(page, noalloc_get_block_write, wbc);
  2502. else
  2503. ret = block_write_full_page(page, noalloc_get_block_write,
  2504. wbc);
  2505. return ret;
  2506. }
  2507. /*
  2508. * This is called via ext4_da_writepages() to
  2509. * calulate the total number of credits to reserve to fit
  2510. * a single extent allocation into a single transaction,
  2511. * ext4_da_writpeages() will loop calling this before
  2512. * the block allocation.
  2513. */
  2514. static int ext4_da_writepages_trans_blocks(struct inode *inode)
  2515. {
  2516. int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
  2517. /*
  2518. * With non-extent format the journal credit needed to
  2519. * insert nrblocks contiguous block is dependent on
  2520. * number of contiguous block. So we will limit
  2521. * number of contiguous block to a sane value
  2522. */
  2523. if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
  2524. (max_blocks > EXT4_MAX_TRANS_DATA))
  2525. max_blocks = EXT4_MAX_TRANS_DATA;
  2526. return ext4_chunk_trans_blocks(inode, max_blocks);
  2527. }
  2528. static int ext4_da_writepages(struct address_space *mapping,
  2529. struct writeback_control *wbc)
  2530. {
  2531. pgoff_t index;
  2532. int range_whole = 0;
  2533. handle_t *handle = NULL;
  2534. struct mpage_da_data mpd;
  2535. struct inode *inode = mapping->host;
  2536. int no_nrwrite_index_update;
  2537. int pages_written = 0;
  2538. long pages_skipped;
  2539. unsigned int max_pages;
  2540. int range_cyclic, cycled = 1, io_done = 0;
  2541. int needed_blocks, ret = 0;
  2542. long desired_nr_to_write, nr_to_writebump = 0;
  2543. loff_t range_start = wbc->range_start;
  2544. struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
  2545. trace_ext4_da_writepages(inode, wbc);
  2546. /*
  2547. * No pages to write? This is mainly a kludge to avoid starting
  2548. * a transaction for special inodes like journal inode on last iput()
  2549. * because that could violate lock ordering on umount
  2550. */
  2551. if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  2552. return 0;
  2553. /*
  2554. * If the filesystem has aborted, it is read-only, so return
  2555. * right away instead of dumping stack traces later on that
  2556. * will obscure the real source of the problem. We test
  2557. * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
  2558. * the latter could be true if the filesystem is mounted
  2559. * read-only, and in that case, ext4_da_writepages should
  2560. * *never* be called, so if that ever happens, we would want
  2561. * the stack trace.
  2562. */
  2563. if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
  2564. return -EROFS;
  2565. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  2566. range_whole = 1;
  2567. range_cyclic = wbc->range_cyclic;
  2568. if (wbc->range_cyclic) {
  2569. index = mapping->writeback_index;
  2570. if (index)
  2571. cycled = 0;
  2572. wbc->range_start = index << PAGE_CACHE_SHIFT;
  2573. wbc->range_end = LLONG_MAX;
  2574. wbc->range_cyclic = 0;
  2575. } else
  2576. index = wbc->range_start >> PAGE_CACHE_SHIFT;
  2577. /*
  2578. * This works around two forms of stupidity. The first is in
  2579. * the writeback code, which caps the maximum number of pages
  2580. * written to be 1024 pages. This is wrong on multiple
  2581. * levels; different architectues have a different page size,
  2582. * which changes the maximum amount of data which gets
  2583. * written. Secondly, 4 megabytes is way too small. XFS
  2584. * forces this value to be 16 megabytes by multiplying
  2585. * nr_to_write parameter by four, and then relies on its
  2586. * allocator to allocate larger extents to make them
  2587. * contiguous. Unfortunately this brings us to the second
  2588. * stupidity, which is that ext4's mballoc code only allocates
  2589. * at most 2048 blocks. So we force contiguous writes up to
  2590. * the number of dirty blocks in the inode, or
  2591. * sbi->max_writeback_mb_bump whichever is smaller.
  2592. */
  2593. max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
  2594. if (!range_cyclic && range_whole)
  2595. desired_nr_to_write = wbc->nr_to_write * 8;
  2596. else
  2597. desired_nr_to_write = ext4_num_dirty_pages(inode, index,
  2598. max_pages);
  2599. if (desired_nr_to_write > max_pages)
  2600. desired_nr_to_write = max_pages;
  2601. if (wbc->nr_to_write < desired_nr_to_write) {
  2602. nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
  2603. wbc->nr_to_write = desired_nr_to_write;
  2604. }
  2605. mpd.wbc = wbc;
  2606. mpd.inode = mapping->host;
  2607. /*
  2608. * we don't want write_cache_pages to update
  2609. * nr_to_write and writeback_index
  2610. */
  2611. no_nrwrite_index_update = wbc->no_nrwrite_index_update;
  2612. wbc->no_nrwrite_index_update = 1;
  2613. pages_skipped = wbc->pages_skipped;
  2614. retry:
  2615. while (!ret && wbc->nr_to_write > 0) {
  2616. /*
  2617. * we insert one extent at a time. So we need
  2618. * credit needed for single extent allocation.
  2619. * journalled mode is currently not supported
  2620. * by delalloc
  2621. */
  2622. BUG_ON(ext4_should_journal_data(inode));
  2623. needed_blocks = ext4_da_writepages_trans_blocks(inode);
  2624. /* start a new transaction*/
  2625. handle = ext4_journal_start(inode, needed_blocks);
  2626. if (IS_ERR(handle)) {
  2627. ret = PTR_ERR(handle);
  2628. ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
  2629. "%ld pages, ino %lu; err %d\n", __func__,
  2630. wbc->nr_to_write, inode->i_ino, ret);
  2631. goto out_writepages;
  2632. }
  2633. /*
  2634. * Now call __mpage_da_writepage to find the next
  2635. * contiguous region of logical blocks that need
  2636. * blocks to be allocated by ext4. We don't actually
  2637. * submit the blocks for I/O here, even though
  2638. * write_cache_pages thinks it will, and will set the
  2639. * pages as clean for write before calling
  2640. * __mpage_da_writepage().
  2641. */
  2642. mpd.b_size = 0;
  2643. mpd.b_state = 0;
  2644. mpd.b_blocknr = 0;
  2645. mpd.first_page = 0;
  2646. mpd.next_page = 0;
  2647. mpd.io_done = 0;
  2648. mpd.pages_written = 0;
  2649. mpd.retval = 0;
  2650. ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
  2651. &mpd);
  2652. /*
  2653. * If we have a contiguous extent of pages and we
  2654. * haven't done the I/O yet, map the blocks and submit
  2655. * them for I/O.
  2656. */
  2657. if (!mpd.io_done && mpd.next_page != mpd.first_page) {
  2658. if (mpage_da_map_blocks(&mpd) == 0)
  2659. mpage_da_submit_io(&mpd);
  2660. mpd.io_done = 1;
  2661. ret = MPAGE_DA_EXTENT_TAIL;
  2662. }
  2663. trace_ext4_da_write_pages(inode, &mpd);
  2664. wbc->nr_to_write -= mpd.pages_written;
  2665. ext4_journal_stop(handle);
  2666. if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
  2667. /* commit the transaction which would
  2668. * free blocks released in the transaction
  2669. * and try again
  2670. */
  2671. jbd2_journal_force_commit_nested(sbi->s_journal);
  2672. wbc->pages_skipped = pages_skipped;
  2673. ret = 0;
  2674. } else if (ret == MPAGE_DA_EXTENT_TAIL) {
  2675. /*
  2676. * got one extent now try with
  2677. * rest of the pages
  2678. */
  2679. pages_written += mpd.pages_written;
  2680. wbc->pages_skipped = pages_skipped;
  2681. ret = 0;
  2682. io_done = 1;
  2683. } else if (wbc->nr_to_write)
  2684. /*
  2685. * There is no more writeout needed
  2686. * or we requested for a noblocking writeout
  2687. * and we found the device congested
  2688. */
  2689. break;
  2690. }
  2691. if (!io_done && !cycled) {
  2692. cycled = 1;
  2693. index = 0;
  2694. wbc->range_start = index << PAGE_CACHE_SHIFT;
  2695. wbc->range_end = mapping->writeback_index - 1;
  2696. goto retry;
  2697. }
  2698. if (pages_skipped != wbc->pages_skipped)
  2699. ext4_msg(inode->i_sb, KERN_CRIT,
  2700. "This should not happen leaving %s "
  2701. "with nr_to_write = %ld ret = %d\n",
  2702. __func__, wbc->nr_to_write, ret);
  2703. /* Update index */
  2704. index += pages_written;
  2705. wbc->range_cyclic = range_cyclic;
  2706. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  2707. /*
  2708. * set the writeback_index so that range_cyclic
  2709. * mode will write it back later
  2710. */
  2711. mapping->writeback_index = index;
  2712. out_writepages:
  2713. if (!no_nrwrite_index_update)
  2714. wbc->no_nrwrite_index_update = 0;
  2715. wbc->nr_to_write -= nr_to_writebump;
  2716. wbc->range_start = range_start;
  2717. trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
  2718. return ret;
  2719. }
  2720. #define FALL_BACK_TO_NONDELALLOC 1
  2721. static int ext4_nonda_switch(struct super_block *sb)
  2722. {
  2723. s64 free_blocks, dirty_blocks;
  2724. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2725. /*
  2726. * switch to non delalloc mode if we are running low
  2727. * on free block. The free block accounting via percpu
  2728. * counters can get slightly wrong with percpu_counter_batch getting
  2729. * accumulated on each CPU without updating global counters
  2730. * Delalloc need an accurate free block accounting. So switch
  2731. * to non delalloc when we are near to error range.
  2732. */
  2733. free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
  2734. dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
  2735. if (2 * free_blocks < 3 * dirty_blocks ||
  2736. free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
  2737. /*
  2738. * free block count is less than 150% of dirty blocks
  2739. * or free blocks is less than watermark
  2740. */
  2741. return 1;
  2742. }
  2743. /*
  2744. * Even if we don't switch but are nearing capacity,
  2745. * start pushing delalloc when 1/2 of free blocks are dirty.
  2746. */
  2747. if (free_blocks < 2 * dirty_blocks)
  2748. writeback_inodes_sb_if_idle(sb);
  2749. return 0;
  2750. }
  2751. static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
  2752. loff_t pos, unsigned len, unsigned flags,
  2753. struct page **pagep, void **fsdata)
  2754. {
  2755. int ret, retries = 0, quota_retries = 0;
  2756. struct page *page;
  2757. pgoff_t index;
  2758. unsigned from, to;
  2759. struct inode *inode = mapping->host;
  2760. handle_t *handle;
  2761. index = pos >> PAGE_CACHE_SHIFT;
  2762. from = pos & (PAGE_CACHE_SIZE - 1);
  2763. to = from + len;
  2764. if (ext4_nonda_switch(inode->i_sb)) {
  2765. *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
  2766. return ext4_write_begin(file, mapping, pos,
  2767. len, flags, pagep, fsdata);
  2768. }
  2769. *fsdata = (void *)0;
  2770. trace_ext4_da_write_begin(inode, pos, len, flags);
  2771. retry:
  2772. /*
  2773. * With delayed allocation, we don't log the i_disksize update
  2774. * if there is delayed block allocation. But we still need
  2775. * to journalling the i_disksize update if writes to the end
  2776. * of file which has an already mapped buffer.
  2777. */
  2778. handle = ext4_journal_start(inode, 1);
  2779. if (IS_ERR(handle)) {
  2780. ret = PTR_ERR(handle);
  2781. goto out;
  2782. }
  2783. /* We cannot recurse into the filesystem as the transaction is already
  2784. * started */
  2785. flags |= AOP_FLAG_NOFS;
  2786. page = grab_cache_page_write_begin(mapping, index, flags);
  2787. if (!page) {
  2788. ext4_journal_stop(handle);
  2789. ret = -ENOMEM;
  2790. goto out;
  2791. }
  2792. *pagep = page;
  2793. ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  2794. ext4_da_get_block_prep);
  2795. if (ret < 0) {
  2796. unlock_page(page);
  2797. ext4_journal_stop(handle);
  2798. page_cache_release(page);
  2799. /*
  2800. * block_write_begin may have instantiated a few blocks
  2801. * outside i_size. Trim these off again. Don't need
  2802. * i_size_read because we hold i_mutex.
  2803. */
  2804. if (pos + len > inode->i_size)
  2805. ext4_truncate_failed_write(inode);
  2806. }
  2807. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  2808. goto retry;
  2809. if ((ret == -EDQUOT) &&
  2810. EXT4_I(inode)->i_reserved_meta_blocks &&
  2811. (quota_retries++ < 3)) {
  2812. /*
  2813. * Since we often over-estimate the number of meta
  2814. * data blocks required, we may sometimes get a
  2815. * spurios out of quota error even though there would
  2816. * be enough space once we write the data blocks and
  2817. * find out how many meta data blocks were _really_
  2818. * required. So try forcing the inode write to see if
  2819. * that helps.
  2820. */
  2821. write_inode_now(inode, (quota_retries == 3));
  2822. goto retry;
  2823. }
  2824. out:
  2825. return ret;
  2826. }
  2827. /*
  2828. * Check if we should update i_disksize
  2829. * when write to the end of file but not require block allocation
  2830. */
  2831. static int ext4_da_should_update_i_disksize(struct page *page,
  2832. unsigned long offset)
  2833. {
  2834. struct buffer_head *bh;
  2835. struct inode *inode = page->mapping->host;
  2836. unsigned int idx;
  2837. int i;
  2838. bh = page_buffers(page);
  2839. idx = offset >> inode->i_blkbits;
  2840. for (i = 0; i < idx; i++)
  2841. bh = bh->b_this_page;
  2842. if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
  2843. return 0;
  2844. return 1;
  2845. }
  2846. static int ext4_da_write_end(struct file *file,
  2847. struct address_space *mapping,
  2848. loff_t pos, unsigned len, unsigned copied,
  2849. struct page *page, void *fsdata)
  2850. {
  2851. struct inode *inode = mapping->host;
  2852. int ret = 0, ret2;
  2853. handle_t *handle = ext4_journal_current_handle();
  2854. loff_t new_i_size;
  2855. unsigned long start, end;
  2856. int write_mode = (int)(unsigned long)fsdata;
  2857. if (write_mode == FALL_BACK_TO_NONDELALLOC) {
  2858. if (ext4_should_order_data(inode)) {
  2859. return ext4_ordered_write_end(file, mapping, pos,
  2860. len, copied, page, fsdata);
  2861. } else if (ext4_should_writeback_data(inode)) {
  2862. return ext4_writeback_write_end(file, mapping, pos,
  2863. len, copied, page, fsdata);
  2864. } else {
  2865. BUG();
  2866. }
  2867. }
  2868. trace_ext4_da_write_end(inode, pos, len, copied);
  2869. start = pos & (PAGE_CACHE_SIZE - 1);
  2870. end = start + copied - 1;
  2871. /*
  2872. * generic_write_end() will run mark_inode_dirty() if i_size
  2873. * changes. So let's piggyback the i_disksize mark_inode_dirty
  2874. * into that.
  2875. */
  2876. new_i_size = pos + copied;
  2877. if (new_i_size > EXT4_I(inode)->i_disksize) {
  2878. if (ext4_da_should_update_i_disksize(page, end)) {
  2879. down_write(&EXT4_I(inode)->i_data_sem);
  2880. if (new_i_size > EXT4_I(inode)->i_disksize) {
  2881. /*
  2882. * Updating i_disksize when extending file
  2883. * without needing block allocation
  2884. */
  2885. if (ext4_should_order_data(inode))
  2886. ret = ext4_jbd2_file_inode(handle,
  2887. inode);
  2888. EXT4_I(inode)->i_disksize = new_i_size;
  2889. }
  2890. up_write(&EXT4_I(inode)->i_data_sem);
  2891. /* We need to mark inode dirty even if
  2892. * new_i_size is less that inode->i_size
  2893. * bu greater than i_disksize.(hint delalloc)
  2894. */
  2895. ext4_mark_inode_dirty(handle, inode);
  2896. }
  2897. }
  2898. ret2 = generic_write_end(file, mapping, pos, len, copied,
  2899. page, fsdata);
  2900. copied = ret2;
  2901. if (ret2 < 0)
  2902. ret = ret2;
  2903. ret2 = ext4_journal_stop(handle);
  2904. if (!ret)
  2905. ret = ret2;
  2906. return ret ? ret : copied;
  2907. }
  2908. static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
  2909. {
  2910. /*
  2911. * Drop reserved blocks
  2912. */
  2913. BUG_ON(!PageLocked(page));
  2914. if (!page_has_buffers(page))
  2915. goto out;
  2916. ext4_da_page_release_reservation(page, offset);
  2917. out:
  2918. ext4_invalidatepage(page, offset);
  2919. return;
  2920. }
  2921. /*
  2922. * Force all delayed allocation blocks to be allocated for a given inode.
  2923. */
  2924. int ext4_alloc_da_blocks(struct inode *inode)
  2925. {
  2926. trace_ext4_alloc_da_blocks(inode);
  2927. if (!EXT4_I(inode)->i_reserved_data_blocks &&
  2928. !EXT4_I(inode)->i_reserved_meta_blocks)
  2929. return 0;
  2930. /*
  2931. * We do something simple for now. The filemap_flush() will
  2932. * also start triggering a write of the data blocks, which is
  2933. * not strictly speaking necessary (and for users of
  2934. * laptop_mode, not even desirable). However, to do otherwise
  2935. * would require replicating code paths in:
  2936. *
  2937. * ext4_da_writepages() ->
  2938. * write_cache_pages() ---> (via passed in callback function)
  2939. * __mpage_da_writepage() -->
  2940. * mpage_add_bh_to_extent()
  2941. * mpage_da_map_blocks()
  2942. *
  2943. * The problem is that write_cache_pages(), located in
  2944. * mm/page-writeback.c, marks pages clean in preparation for
  2945. * doing I/O, which is not desirable if we're not planning on
  2946. * doing I/O at all.
  2947. *
  2948. * We could call write_cache_pages(), and then redirty all of
  2949. * the pages by calling redirty_page_for_writeback() but that
  2950. * would be ugly in the extreme. So instead we would need to
  2951. * replicate parts of the code in the above functions,
  2952. * simplifying them becuase we wouldn't actually intend to
  2953. * write out the pages, but rather only collect contiguous
  2954. * logical block extents, call the multi-block allocator, and
  2955. * then update the buffer heads with the block allocations.
  2956. *
  2957. * For now, though, we'll cheat by calling filemap_flush(),
  2958. * which will map the blocks, and start the I/O, but not
  2959. * actually wait for the I/O to complete.
  2960. */
  2961. return filemap_flush(inode->i_mapping);
  2962. }
  2963. /*
  2964. * bmap() is special. It gets used by applications such as lilo and by
  2965. * the swapper to find the on-disk block of a specific piece of data.
  2966. *
  2967. * Naturally, this is dangerous if the block concerned is still in the
  2968. * journal. If somebody makes a swapfile on an ext4 data-journaling
  2969. * filesystem and enables swap, then they may get a nasty shock when the
  2970. * data getting swapped to that swapfile suddenly gets overwritten by
  2971. * the original zero's written out previously to the journal and
  2972. * awaiting writeback in the kernel's buffer cache.
  2973. *
  2974. * So, if we see any bmap calls here on a modified, data-journaled file,
  2975. * take extra steps to flush any blocks which might be in the cache.
  2976. */
  2977. static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
  2978. {
  2979. struct inode *inode = mapping->host;
  2980. journal_t *journal;
  2981. int err;
  2982. if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
  2983. test_opt(inode->i_sb, DELALLOC)) {
  2984. /*
  2985. * With delalloc we want to sync the file
  2986. * so that we can make sure we allocate
  2987. * blocks for file
  2988. */
  2989. filemap_write_and_wait(mapping);
  2990. }
  2991. if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
  2992. /*
  2993. * This is a REALLY heavyweight approach, but the use of
  2994. * bmap on dirty files is expected to be extremely rare:
  2995. * only if we run lilo or swapon on a freshly made file
  2996. * do we expect this to happen.
  2997. *
  2998. * (bmap requires CAP_SYS_RAWIO so this does not
  2999. * represent an unprivileged user DOS attack --- we'd be
  3000. * in trouble if mortal users could trigger this path at
  3001. * will.)
  3002. *
  3003. * NB. EXT4_STATE_JDATA is not set on files other than
  3004. * regular files. If somebody wants to bmap a directory
  3005. * or symlink and gets confused because the buffer
  3006. * hasn't yet been flushed to disk, they deserve
  3007. * everything they get.
  3008. */
  3009. EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
  3010. journal = EXT4_JOURNAL(inode);
  3011. jbd2_journal_lock_updates(journal);
  3012. err = jbd2_journal_flush(journal);
  3013. jbd2_journal_unlock_updates(journal);
  3014. if (err)
  3015. return 0;
  3016. }
  3017. return generic_block_bmap(mapping, block, ext4_get_block);
  3018. }
  3019. static int ext4_readpage(struct file *file, struct page *page)
  3020. {
  3021. return mpage_readpage(page, ext4_get_block);
  3022. }
  3023. static int
  3024. ext4_readpages(struct file *file, struct address_space *mapping,
  3025. struct list_head *pages, unsigned nr_pages)
  3026. {
  3027. return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
  3028. }
  3029. static void ext4_invalidatepage(struct page *page, unsigned long offset)
  3030. {
  3031. journal_t *journal = EXT4_JOURNAL(page->mapping->host);
  3032. /*
  3033. * If it's a full truncate we just forget about the pending dirtying
  3034. */
  3035. if (offset == 0)
  3036. ClearPageChecked(page);
  3037. if (journal)
  3038. jbd2_journal_invalidatepage(journal, page, offset);
  3039. else
  3040. block_invalidatepage(page, offset);
  3041. }
  3042. static int ext4_releasepage(struct page *page, gfp_t wait)
  3043. {
  3044. journal_t *journal = EXT4_JOURNAL(page->mapping->host);
  3045. WARN_ON(PageChecked(page));
  3046. if (!page_has_buffers(page))
  3047. return 0;
  3048. if (journal)
  3049. return jbd2_journal_try_to_free_buffers(journal, page, wait);
  3050. else
  3051. return try_to_free_buffers(page);
  3052. }
  3053. /*
  3054. * O_DIRECT for ext3 (or indirect map) based files
  3055. *
  3056. * If the O_DIRECT write will extend the file then add this inode to the
  3057. * orphan list. So recovery will truncate it back to the original size
  3058. * if the machine crashes during the write.
  3059. *
  3060. * If the O_DIRECT write is intantiating holes inside i_size and the machine
  3061. * crashes then stale disk data _may_ be exposed inside the file. But current
  3062. * VFS code falls back into buffered path in that case so we are safe.
  3063. */
  3064. static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
  3065. const struct iovec *iov, loff_t offset,
  3066. unsigned long nr_segs)
  3067. {
  3068. struct file *file = iocb->ki_filp;
  3069. struct inode *inode = file->f_mapping->host;
  3070. struct ext4_inode_info *ei = EXT4_I(inode);
  3071. handle_t *handle;
  3072. ssize_t ret;
  3073. int orphan = 0;
  3074. size_t count = iov_length(iov, nr_segs);
  3075. int retries = 0;
  3076. if (rw == WRITE) {
  3077. loff_t final_size = offset + count;
  3078. if (final_size > inode->i_size) {
  3079. /* Credits for sb + inode write */
  3080. handle = ext4_journal_start(inode, 2);
  3081. if (IS_ERR(handle)) {
  3082. ret = PTR_ERR(handle);
  3083. goto out;
  3084. }
  3085. ret = ext4_orphan_add(handle, inode);
  3086. if (ret) {
  3087. ext4_journal_stop(handle);
  3088. goto out;
  3089. }
  3090. orphan = 1;
  3091. ei->i_disksize = inode->i_size;
  3092. ext4_journal_stop(handle);
  3093. }
  3094. }
  3095. retry:
  3096. ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
  3097. offset, nr_segs,
  3098. ext4_get_block, NULL);
  3099. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  3100. goto retry;
  3101. if (orphan) {
  3102. int err;
  3103. /* Credits for sb + inode write */
  3104. handle = ext4_journal_start(inode, 2);
  3105. if (IS_ERR(handle)) {
  3106. /* This is really bad luck. We've written the data
  3107. * but cannot extend i_size. Bail out and pretend
  3108. * the write failed... */
  3109. ret = PTR_ERR(handle);
  3110. goto out;
  3111. }
  3112. if (inode->i_nlink)
  3113. ext4_orphan_del(handle, inode);
  3114. if (ret > 0) {
  3115. loff_t end = offset + ret;
  3116. if (end > inode->i_size) {
  3117. ei->i_disksize = end;
  3118. i_size_write(inode, end);
  3119. /*
  3120. * We're going to return a positive `ret'
  3121. * here due to non-zero-length I/O, so there's
  3122. * no way of reporting error returns from
  3123. * ext4_mark_inode_dirty() to userspace. So
  3124. * ignore it.
  3125. */
  3126. ext4_mark_inode_dirty(handle, inode);
  3127. }
  3128. }
  3129. err = ext4_journal_stop(handle);
  3130. if (ret == 0)
  3131. ret = err;
  3132. }
  3133. out:
  3134. return ret;
  3135. }
  3136. static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
  3137. struct buffer_head *bh_result, int create)
  3138. {
  3139. handle_t *handle = NULL;
  3140. int ret = 0;
  3141. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  3142. int dio_credits;
  3143. ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
  3144. inode->i_ino, create);
  3145. /*
  3146. * DIO VFS code passes create = 0 flag for write to
  3147. * the middle of file. It does this to avoid block
  3148. * allocation for holes, to prevent expose stale data
  3149. * out when there is parallel buffered read (which does
  3150. * not hold the i_mutex lock) while direct IO write has
  3151. * not completed. DIO request on holes finally falls back
  3152. * to buffered IO for this reason.
  3153. *
  3154. * For ext4 extent based file, since we support fallocate,
  3155. * new allocated extent as uninitialized, for holes, we
  3156. * could fallocate blocks for holes, thus parallel
  3157. * buffered IO read will zero out the page when read on
  3158. * a hole while parallel DIO write to the hole has not completed.
  3159. *
  3160. * when we come here, we know it's a direct IO write to
  3161. * to the middle of file (<i_size)
  3162. * so it's safe to override the create flag from VFS.
  3163. */
  3164. create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
  3165. if (max_blocks > DIO_MAX_BLOCKS)
  3166. max_blocks = DIO_MAX_BLOCKS;
  3167. dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
  3168. handle = ext4_journal_start(inode, dio_credits);
  3169. if (IS_ERR(handle)) {
  3170. ret = PTR_ERR(handle);
  3171. goto out;
  3172. }
  3173. ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
  3174. create);
  3175. if (ret > 0) {
  3176. bh_result->b_size = (ret << inode->i_blkbits);
  3177. ret = 0;
  3178. }
  3179. ext4_journal_stop(handle);
  3180. out:
  3181. return ret;
  3182. }
  3183. static void ext4_free_io_end(ext4_io_end_t *io)
  3184. {
  3185. BUG_ON(!io);
  3186. iput(io->inode);
  3187. kfree(io);
  3188. }
  3189. static void dump_aio_dio_list(struct inode * inode)
  3190. {
  3191. #ifdef EXT4_DEBUG
  3192. struct list_head *cur, *before, *after;
  3193. ext4_io_end_t *io, *io0, *io1;
  3194. if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
  3195. ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
  3196. return;
  3197. }
  3198. ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
  3199. list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
  3200. cur = &io->list;
  3201. before = cur->prev;
  3202. io0 = container_of(before, ext4_io_end_t, list);
  3203. after = cur->next;
  3204. io1 = container_of(after, ext4_io_end_t, list);
  3205. ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
  3206. io, inode->i_ino, io0, io1);
  3207. }
  3208. #endif
  3209. }
  3210. /*
  3211. * check a range of space and convert unwritten extents to written.
  3212. */
  3213. static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
  3214. {
  3215. struct inode *inode = io->inode;
  3216. loff_t offset = io->offset;
  3217. size_t size = io->size;
  3218. int ret = 0;
  3219. ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
  3220. "list->prev 0x%p\n",
  3221. io, inode->i_ino, io->list.next, io->list.prev);
  3222. if (list_empty(&io->list))
  3223. return ret;
  3224. if (io->flag != DIO_AIO_UNWRITTEN)
  3225. return ret;
  3226. if (offset + size <= i_size_read(inode))
  3227. ret = ext4_convert_unwritten_extents(inode, offset, size);
  3228. if (ret < 0) {
  3229. printk(KERN_EMERG "%s: failed to convert unwritten"
  3230. "extents to written extents, error is %d"
  3231. " io is still on inode %lu aio dio list\n",
  3232. __func__, ret, inode->i_ino);
  3233. return ret;
  3234. }
  3235. /* clear the DIO AIO unwritten flag */
  3236. io->flag = 0;
  3237. return ret;
  3238. }
  3239. /*
  3240. * work on completed aio dio IO, to convert unwritten extents to extents
  3241. */
  3242. static void ext4_end_aio_dio_work(struct work_struct *work)
  3243. {
  3244. ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
  3245. struct inode *inode = io->inode;
  3246. int ret = 0;
  3247. mutex_lock(&inode->i_mutex);
  3248. ret = ext4_end_aio_dio_nolock(io);
  3249. if (ret >= 0) {
  3250. if (!list_empty(&io->list))
  3251. list_del_init(&io->list);
  3252. ext4_free_io_end(io);
  3253. }
  3254. mutex_unlock(&inode->i_mutex);
  3255. }
  3256. /*
  3257. * This function is called from ext4_sync_file().
  3258. *
  3259. * When AIO DIO IO is completed, the work to convert unwritten
  3260. * extents to written is queued on workqueue but may not get immediately
  3261. * scheduled. When fsync is called, we need to ensure the
  3262. * conversion is complete before fsync returns.
  3263. * The inode keeps track of a list of completed AIO from DIO path
  3264. * that might needs to do the conversion. This function walks through
  3265. * the list and convert the related unwritten extents to written.
  3266. */
  3267. int flush_aio_dio_completed_IO(struct inode *inode)
  3268. {
  3269. ext4_io_end_t *io;
  3270. int ret = 0;
  3271. int ret2 = 0;
  3272. if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
  3273. return ret;
  3274. dump_aio_dio_list(inode);
  3275. while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
  3276. io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
  3277. ext4_io_end_t, list);
  3278. /*
  3279. * Calling ext4_end_aio_dio_nolock() to convert completed
  3280. * IO to written.
  3281. *
  3282. * When ext4_sync_file() is called, run_queue() may already
  3283. * about to flush the work corresponding to this io structure.
  3284. * It will be upset if it founds the io structure related
  3285. * to the work-to-be schedule is freed.
  3286. *
  3287. * Thus we need to keep the io structure still valid here after
  3288. * convertion finished. The io structure has a flag to
  3289. * avoid double converting from both fsync and background work
  3290. * queue work.
  3291. */
  3292. ret = ext4_end_aio_dio_nolock(io);
  3293. if (ret < 0)
  3294. ret2 = ret;
  3295. else
  3296. list_del_init(&io->list);
  3297. }
  3298. return (ret2 < 0) ? ret2 : 0;
  3299. }
  3300. static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
  3301. {
  3302. ext4_io_end_t *io = NULL;
  3303. io = kmalloc(sizeof(*io), GFP_NOFS);
  3304. if (io) {
  3305. igrab(inode);
  3306. io->inode = inode;
  3307. io->flag = 0;
  3308. io->offset = 0;
  3309. io->size = 0;
  3310. io->error = 0;
  3311. INIT_WORK(&io->work, ext4_end_aio_dio_work);
  3312. INIT_LIST_HEAD(&io->list);
  3313. }
  3314. return io;
  3315. }
  3316. static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
  3317. ssize_t size, void *private)
  3318. {
  3319. ext4_io_end_t *io_end = iocb->private;
  3320. struct workqueue_struct *wq;
  3321. /* if not async direct IO or dio with 0 bytes write, just return */
  3322. if (!io_end || !size)
  3323. return;
  3324. ext_debug("ext4_end_io_dio(): io_end 0x%p"
  3325. "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
  3326. iocb->private, io_end->inode->i_ino, iocb, offset,
  3327. size);
  3328. /* if not aio dio with unwritten extents, just free io and return */
  3329. if (io_end->flag != DIO_AIO_UNWRITTEN){
  3330. ext4_free_io_end(io_end);
  3331. iocb->private = NULL;
  3332. return;
  3333. }
  3334. io_end->offset = offset;
  3335. io_end->size = size;
  3336. wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
  3337. /* queue the work to convert unwritten extents to written */
  3338. queue_work(wq, &io_end->work);
  3339. /* Add the io_end to per-inode completed aio dio list*/
  3340. list_add_tail(&io_end->list,
  3341. &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
  3342. iocb->private = NULL;
  3343. }
  3344. /*
  3345. * For ext4 extent files, ext4 will do direct-io write to holes,
  3346. * preallocated extents, and those write extend the file, no need to
  3347. * fall back to buffered IO.
  3348. *
  3349. * For holes, we fallocate those blocks, mark them as unintialized
  3350. * If those blocks were preallocated, we mark sure they are splited, but
  3351. * still keep the range to write as unintialized.
  3352. *
  3353. * The unwrritten extents will be converted to written when DIO is completed.
  3354. * For async direct IO, since the IO may still pending when return, we
  3355. * set up an end_io call back function, which will do the convertion
  3356. * when async direct IO completed.
  3357. *
  3358. * If the O_DIRECT write will extend the file then add this inode to the
  3359. * orphan list. So recovery will truncate it back to the original size
  3360. * if the machine crashes during the write.
  3361. *
  3362. */
  3363. static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
  3364. const struct iovec *iov, loff_t offset,
  3365. unsigned long nr_segs)
  3366. {
  3367. struct file *file = iocb->ki_filp;
  3368. struct inode *inode = file->f_mapping->host;
  3369. ssize_t ret;
  3370. size_t count = iov_length(iov, nr_segs);
  3371. loff_t final_size = offset + count;
  3372. if (rw == WRITE && final_size <= inode->i_size) {
  3373. /*
  3374. * We could direct write to holes and fallocate.
  3375. *
  3376. * Allocated blocks to fill the hole are marked as uninitialized
  3377. * to prevent paralel buffered read to expose the stale data
  3378. * before DIO complete the data IO.
  3379. *
  3380. * As to previously fallocated extents, ext4 get_block
  3381. * will just simply mark the buffer mapped but still
  3382. * keep the extents uninitialized.
  3383. *
  3384. * for non AIO case, we will convert those unwritten extents
  3385. * to written after return back from blockdev_direct_IO.
  3386. *
  3387. * for async DIO, the conversion needs to be defered when
  3388. * the IO is completed. The ext4 end_io callback function
  3389. * will be called to take care of the conversion work.
  3390. * Here for async case, we allocate an io_end structure to
  3391. * hook to the iocb.
  3392. */
  3393. iocb->private = NULL;
  3394. EXT4_I(inode)->cur_aio_dio = NULL;
  3395. if (!is_sync_kiocb(iocb)) {
  3396. iocb->private = ext4_init_io_end(inode);
  3397. if (!iocb->private)
  3398. return -ENOMEM;
  3399. /*
  3400. * we save the io structure for current async
  3401. * direct IO, so that later ext4_get_blocks()
  3402. * could flag the io structure whether there
  3403. * is a unwritten extents needs to be converted
  3404. * when IO is completed.
  3405. */
  3406. EXT4_I(inode)->cur_aio_dio = iocb->private;
  3407. }
  3408. ret = blockdev_direct_IO(rw, iocb, inode,
  3409. inode->i_sb->s_bdev, iov,
  3410. offset, nr_segs,
  3411. ext4_get_block_dio_write,
  3412. ext4_end_io_dio);
  3413. if (iocb->private)
  3414. EXT4_I(inode)->cur_aio_dio = NULL;
  3415. /*
  3416. * The io_end structure takes a reference to the inode,
  3417. * that structure needs to be destroyed and the
  3418. * reference to the inode need to be dropped, when IO is
  3419. * complete, even with 0 byte write, or failed.
  3420. *
  3421. * In the successful AIO DIO case, the io_end structure will be
  3422. * desctroyed and the reference to the inode will be dropped
  3423. * after the end_io call back function is called.
  3424. *
  3425. * In the case there is 0 byte write, or error case, since
  3426. * VFS direct IO won't invoke the end_io call back function,
  3427. * we need to free the end_io structure here.
  3428. */
  3429. if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
  3430. ext4_free_io_end(iocb->private);
  3431. iocb->private = NULL;
  3432. } else if (ret > 0 && (EXT4_I(inode)->i_state &
  3433. EXT4_STATE_DIO_UNWRITTEN)) {
  3434. int err;
  3435. /*
  3436. * for non AIO case, since the IO is already
  3437. * completed, we could do the convertion right here
  3438. */
  3439. err = ext4_convert_unwritten_extents(inode,
  3440. offset, ret);
  3441. if (err < 0)
  3442. ret = err;
  3443. EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
  3444. }
  3445. return ret;
  3446. }
  3447. /* for write the the end of file case, we fall back to old way */
  3448. return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
  3449. }
  3450. static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
  3451. const struct iovec *iov, loff_t offset,
  3452. unsigned long nr_segs)
  3453. {
  3454. struct file *file = iocb->ki_filp;
  3455. struct inode *inode = file->f_mapping->host;
  3456. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
  3457. return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
  3458. return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
  3459. }
  3460. /*
  3461. * Pages can be marked dirty completely asynchronously from ext4's journalling
  3462. * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
  3463. * much here because ->set_page_dirty is called under VFS locks. The page is
  3464. * not necessarily locked.
  3465. *
  3466. * We cannot just dirty the page and leave attached buffers clean, because the
  3467. * buffers' dirty state is "definitive". We cannot just set the buffers dirty
  3468. * or jbddirty because all the journalling code will explode.
  3469. *
  3470. * So what we do is to mark the page "pending dirty" and next time writepage
  3471. * is called, propagate that into the buffers appropriately.
  3472. */
  3473. static int ext4_journalled_set_page_dirty(struct page *page)
  3474. {
  3475. SetPageChecked(page);
  3476. return __set_page_dirty_nobuffers(page);
  3477. }
  3478. static const struct address_space_operations ext4_ordered_aops = {
  3479. .readpage = ext4_readpage,
  3480. .readpages = ext4_readpages,
  3481. .writepage = ext4_writepage,
  3482. .sync_page = block_sync_page,
  3483. .write_begin = ext4_write_begin,
  3484. .write_end = ext4_ordered_write_end,
  3485. .bmap = ext4_bmap,
  3486. .invalidatepage = ext4_invalidatepage,
  3487. .releasepage = ext4_releasepage,
  3488. .direct_IO = ext4_direct_IO,
  3489. .migratepage = buffer_migrate_page,
  3490. .is_partially_uptodate = block_is_partially_uptodate,
  3491. .error_remove_page = generic_error_remove_page,
  3492. };
  3493. static const struct address_space_operations ext4_writeback_aops = {
  3494. .readpage = ext4_readpage,
  3495. .readpages = ext4_readpages,
  3496. .writepage = ext4_writepage,
  3497. .sync_page = block_sync_page,
  3498. .write_begin = ext4_write_begin,
  3499. .write_end = ext4_writeback_write_end,
  3500. .bmap = ext4_bmap,
  3501. .invalidatepage = ext4_invalidatepage,
  3502. .releasepage = ext4_releasepage,
  3503. .direct_IO = ext4_direct_IO,
  3504. .migratepage = buffer_migrate_page,
  3505. .is_partially_uptodate = block_is_partially_uptodate,
  3506. .error_remove_page = generic_error_remove_page,
  3507. };
  3508. static const struct address_space_operations ext4_journalled_aops = {
  3509. .readpage = ext4_readpage,
  3510. .readpages = ext4_readpages,
  3511. .writepage = ext4_writepage,
  3512. .sync_page = block_sync_page,
  3513. .write_begin = ext4_write_begin,
  3514. .write_end = ext4_journalled_write_end,
  3515. .set_page_dirty = ext4_journalled_set_page_dirty,
  3516. .bmap = ext4_bmap,
  3517. .invalidatepage = ext4_invalidatepage,
  3518. .releasepage = ext4_releasepage,
  3519. .is_partially_uptodate = block_is_partially_uptodate,
  3520. .error_remove_page = generic_error_remove_page,
  3521. };
  3522. static const struct address_space_operations ext4_da_aops = {
  3523. .readpage = ext4_readpage,
  3524. .readpages = ext4_readpages,
  3525. .writepage = ext4_writepage,
  3526. .writepages = ext4_da_writepages,
  3527. .sync_page = block_sync_page,
  3528. .write_begin = ext4_da_write_begin,
  3529. .write_end = ext4_da_write_end,
  3530. .bmap = ext4_bmap,
  3531. .invalidatepage = ext4_da_invalidatepage,
  3532. .releasepage = ext4_releasepage,
  3533. .direct_IO = ext4_direct_IO,
  3534. .migratepage = buffer_migrate_page,
  3535. .is_partially_uptodate = block_is_partially_uptodate,
  3536. .error_remove_page = generic_error_remove_page,
  3537. };
  3538. void ext4_set_aops(struct inode *inode)
  3539. {
  3540. if (ext4_should_order_data(inode) &&
  3541. test_opt(inode->i_sb, DELALLOC))
  3542. inode->i_mapping->a_ops = &ext4_da_aops;
  3543. else if (ext4_should_order_data(inode))
  3544. inode->i_mapping->a_ops = &ext4_ordered_aops;
  3545. else if (ext4_should_writeback_data(inode) &&
  3546. test_opt(inode->i_sb, DELALLOC))
  3547. inode->i_mapping->a_ops = &ext4_da_aops;
  3548. else if (ext4_should_writeback_data(inode))
  3549. inode->i_mapping->a_ops = &ext4_writeback_aops;
  3550. else
  3551. inode->i_mapping->a_ops = &ext4_journalled_aops;
  3552. }
  3553. /*
  3554. * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
  3555. * up to the end of the block which corresponds to `from'.
  3556. * This required during truncate. We need to physically zero the tail end
  3557. * of that block so it doesn't yield old data if the file is later grown.
  3558. */
  3559. int ext4_block_truncate_page(handle_t *handle,
  3560. struct address_space *mapping, loff_t from)
  3561. {
  3562. ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
  3563. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  3564. unsigned blocksize, length, pos;
  3565. ext4_lblk_t iblock;
  3566. struct inode *inode = mapping->host;
  3567. struct buffer_head *bh;
  3568. struct page *page;
  3569. int err = 0;
  3570. page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
  3571. mapping_gfp_mask(mapping) & ~__GFP_FS);
  3572. if (!page)
  3573. return -EINVAL;
  3574. blocksize = inode->i_sb->s_blocksize;
  3575. length = blocksize - (offset & (blocksize - 1));
  3576. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  3577. /*
  3578. * For "nobh" option, we can only work if we don't need to
  3579. * read-in the page - otherwise we create buffers to do the IO.
  3580. */
  3581. if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
  3582. ext4_should_writeback_data(inode) && PageUptodate(page)) {
  3583. zero_user(page, offset, length);
  3584. set_page_dirty(page);
  3585. goto unlock;
  3586. }
  3587. if (!page_has_buffers(page))
  3588. create_empty_buffers(page, blocksize, 0);
  3589. /* Find the buffer that contains "offset" */
  3590. bh = page_buffers(page);
  3591. pos = blocksize;
  3592. while (offset >= pos) {
  3593. bh = bh->b_this_page;
  3594. iblock++;
  3595. pos += blocksize;
  3596. }
  3597. err = 0;
  3598. if (buffer_freed(bh)) {
  3599. BUFFER_TRACE(bh, "freed: skip");
  3600. goto unlock;
  3601. }
  3602. if (!buffer_mapped(bh)) {
  3603. BUFFER_TRACE(bh, "unmapped");
  3604. ext4_get_block(inode, iblock, bh, 0);
  3605. /* unmapped? It's a hole - nothing to do */
  3606. if (!buffer_mapped(bh)) {
  3607. BUFFER_TRACE(bh, "still unmapped");
  3608. goto unlock;
  3609. }
  3610. }
  3611. /* Ok, it's mapped. Make sure it's up-to-date */
  3612. if (PageUptodate(page))
  3613. set_buffer_uptodate(bh);
  3614. if (!buffer_uptodate(bh)) {
  3615. err = -EIO;
  3616. ll_rw_block(READ, 1, &bh);
  3617. wait_on_buffer(bh);
  3618. /* Uhhuh. Read error. Complain and punt. */
  3619. if (!buffer_uptodate(bh))
  3620. goto unlock;
  3621. }
  3622. if (ext4_should_journal_data(inode)) {
  3623. BUFFER_TRACE(bh, "get write access");
  3624. err = ext4_journal_get_write_access(handle, bh);
  3625. if (err)
  3626. goto unlock;
  3627. }
  3628. zero_user(page, offset, length);
  3629. BUFFER_TRACE(bh, "zeroed end of block");
  3630. err = 0;
  3631. if (ext4_should_journal_data(inode)) {
  3632. err = ext4_handle_dirty_metadata(handle, inode, bh);
  3633. } else {
  3634. if (ext4_should_order_data(inode))
  3635. err = ext4_jbd2_file_inode(handle, inode);
  3636. mark_buffer_dirty(bh);
  3637. }
  3638. unlock:
  3639. unlock_page(page);
  3640. page_cache_release(page);
  3641. return err;
  3642. }
  3643. /*
  3644. * Probably it should be a library function... search for first non-zero word
  3645. * or memcmp with zero_page, whatever is better for particular architecture.
  3646. * Linus?
  3647. */
  3648. static inline int all_zeroes(__le32 *p, __le32 *q)
  3649. {
  3650. while (p < q)
  3651. if (*p++)
  3652. return 0;
  3653. return 1;
  3654. }
  3655. /**
  3656. * ext4_find_shared - find the indirect blocks for partial truncation.
  3657. * @inode: inode in question
  3658. * @depth: depth of the affected branch
  3659. * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
  3660. * @chain: place to store the pointers to partial indirect blocks
  3661. * @top: place to the (detached) top of branch
  3662. *
  3663. * This is a helper function used by ext4_truncate().
  3664. *
  3665. * When we do truncate() we may have to clean the ends of several
  3666. * indirect blocks but leave the blocks themselves alive. Block is
  3667. * partially truncated if some data below the new i_size is refered
  3668. * from it (and it is on the path to the first completely truncated
  3669. * data block, indeed). We have to free the top of that path along
  3670. * with everything to the right of the path. Since no allocation
  3671. * past the truncation point is possible until ext4_truncate()
  3672. * finishes, we may safely do the latter, but top of branch may
  3673. * require special attention - pageout below the truncation point
  3674. * might try to populate it.
  3675. *
  3676. * We atomically detach the top of branch from the tree, store the
  3677. * block number of its root in *@top, pointers to buffer_heads of
  3678. * partially truncated blocks - in @chain[].bh and pointers to
  3679. * their last elements that should not be removed - in
  3680. * @chain[].p. Return value is the pointer to last filled element
  3681. * of @chain.
  3682. *
  3683. * The work left to caller to do the actual freeing of subtrees:
  3684. * a) free the subtree starting from *@top
  3685. * b) free the subtrees whose roots are stored in
  3686. * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
  3687. * c) free the subtrees growing from the inode past the @chain[0].
  3688. * (no partially truncated stuff there). */
  3689. static Indirect *ext4_find_shared(struct inode *inode, int depth,
  3690. ext4_lblk_t offsets[4], Indirect chain[4],
  3691. __le32 *top)
  3692. {
  3693. Indirect *partial, *p;
  3694. int k, err;
  3695. *top = 0;
  3696. /* Make k index the deepest non-null offset + 1 */
  3697. for (k = depth; k > 1 && !offsets[k-1]; k--)
  3698. ;
  3699. partial = ext4_get_branch(inode, k, offsets, chain, &err);
  3700. /* Writer: pointers */
  3701. if (!partial)
  3702. partial = chain + k-1;
  3703. /*
  3704. * If the branch acquired continuation since we've looked at it -
  3705. * fine, it should all survive and (new) top doesn't belong to us.
  3706. */
  3707. if (!partial->key && *partial->p)
  3708. /* Writer: end */
  3709. goto no_top;
  3710. for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
  3711. ;
  3712. /*
  3713. * OK, we've found the last block that must survive. The rest of our
  3714. * branch should be detached before unlocking. However, if that rest
  3715. * of branch is all ours and does not grow immediately from the inode
  3716. * it's easier to cheat and just decrement partial->p.
  3717. */
  3718. if (p == chain + k - 1 && p > chain) {
  3719. p->p--;
  3720. } else {
  3721. *top = *p->p;
  3722. /* Nope, don't do this in ext4. Must leave the tree intact */
  3723. #if 0
  3724. *p->p = 0;
  3725. #endif
  3726. }
  3727. /* Writer: end */
  3728. while (partial > p) {
  3729. brelse(partial->bh);
  3730. partial--;
  3731. }
  3732. no_top:
  3733. return partial;
  3734. }
  3735. /*
  3736. * Zero a number of block pointers in either an inode or an indirect block.
  3737. * If we restart the transaction we must again get write access to the
  3738. * indirect block for further modification.
  3739. *
  3740. * We release `count' blocks on disk, but (last - first) may be greater
  3741. * than `count' because there can be holes in there.
  3742. */
  3743. static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
  3744. struct buffer_head *bh,
  3745. ext4_fsblk_t block_to_free,
  3746. unsigned long count, __le32 *first,
  3747. __le32 *last)
  3748. {
  3749. __le32 *p;
  3750. int flags = EXT4_FREE_BLOCKS_FORGET;
  3751. if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
  3752. flags |= EXT4_FREE_BLOCKS_METADATA;
  3753. if (try_to_extend_transaction(handle, inode)) {
  3754. if (bh) {
  3755. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  3756. ext4_handle_dirty_metadata(handle, inode, bh);
  3757. }
  3758. ext4_mark_inode_dirty(handle, inode);
  3759. ext4_truncate_restart_trans(handle, inode,
  3760. blocks_for_truncate(inode));
  3761. if (bh) {
  3762. BUFFER_TRACE(bh, "retaking write access");
  3763. ext4_journal_get_write_access(handle, bh);
  3764. }
  3765. }
  3766. for (p = first; p < last; p++)
  3767. *p = 0;
  3768. ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
  3769. }
  3770. /**
  3771. * ext4_free_data - free a list of data blocks
  3772. * @handle: handle for this transaction
  3773. * @inode: inode we are dealing with
  3774. * @this_bh: indirect buffer_head which contains *@first and *@last
  3775. * @first: array of block numbers
  3776. * @last: points immediately past the end of array
  3777. *
  3778. * We are freeing all blocks refered from that array (numbers are stored as
  3779. * little-endian 32-bit) and updating @inode->i_blocks appropriately.
  3780. *
  3781. * We accumulate contiguous runs of blocks to free. Conveniently, if these
  3782. * blocks are contiguous then releasing them at one time will only affect one
  3783. * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
  3784. * actually use a lot of journal space.
  3785. *
  3786. * @this_bh will be %NULL if @first and @last point into the inode's direct
  3787. * block pointers.
  3788. */
  3789. static void ext4_free_data(handle_t *handle, struct inode *inode,
  3790. struct buffer_head *this_bh,
  3791. __le32 *first, __le32 *last)
  3792. {
  3793. ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
  3794. unsigned long count = 0; /* Number of blocks in the run */
  3795. __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
  3796. corresponding to
  3797. block_to_free */
  3798. ext4_fsblk_t nr; /* Current block # */
  3799. __le32 *p; /* Pointer into inode/ind
  3800. for current block */
  3801. int err;
  3802. if (this_bh) { /* For indirect block */
  3803. BUFFER_TRACE(this_bh, "get_write_access");
  3804. err = ext4_journal_get_write_access(handle, this_bh);
  3805. /* Important: if we can't update the indirect pointers
  3806. * to the blocks, we can't free them. */
  3807. if (err)
  3808. return;
  3809. }
  3810. for (p = first; p < last; p++) {
  3811. nr = le32_to_cpu(*p);
  3812. if (nr) {
  3813. /* accumulate blocks to free if they're contiguous */
  3814. if (count == 0) {
  3815. block_to_free = nr;
  3816. block_to_free_p = p;
  3817. count = 1;
  3818. } else if (nr == block_to_free + count) {
  3819. count++;
  3820. } else {
  3821. ext4_clear_blocks(handle, inode, this_bh,
  3822. block_to_free,
  3823. count, block_to_free_p, p);
  3824. block_to_free = nr;
  3825. block_to_free_p = p;
  3826. count = 1;
  3827. }
  3828. }
  3829. }
  3830. if (count > 0)
  3831. ext4_clear_blocks(handle, inode, this_bh, block_to_free,
  3832. count, block_to_free_p, p);
  3833. if (this_bh) {
  3834. BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
  3835. /*
  3836. * The buffer head should have an attached journal head at this
  3837. * point. However, if the data is corrupted and an indirect
  3838. * block pointed to itself, it would have been detached when
  3839. * the block was cleared. Check for this instead of OOPSing.
  3840. */
  3841. if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
  3842. ext4_handle_dirty_metadata(handle, inode, this_bh);
  3843. else
  3844. ext4_error(inode->i_sb, __func__,
  3845. "circular indirect block detected, "
  3846. "inode=%lu, block=%llu",
  3847. inode->i_ino,
  3848. (unsigned long long) this_bh->b_blocknr);
  3849. }
  3850. }
  3851. /**
  3852. * ext4_free_branches - free an array of branches
  3853. * @handle: JBD handle for this transaction
  3854. * @inode: inode we are dealing with
  3855. * @parent_bh: the buffer_head which contains *@first and *@last
  3856. * @first: array of block numbers
  3857. * @last: pointer immediately past the end of array
  3858. * @depth: depth of the branches to free
  3859. *
  3860. * We are freeing all blocks refered from these branches (numbers are
  3861. * stored as little-endian 32-bit) and updating @inode->i_blocks
  3862. * appropriately.
  3863. */
  3864. static void ext4_free_branches(handle_t *handle, struct inode *inode,
  3865. struct buffer_head *parent_bh,
  3866. __le32 *first, __le32 *last, int depth)
  3867. {
  3868. ext4_fsblk_t nr;
  3869. __le32 *p;
  3870. if (ext4_handle_is_aborted(handle))
  3871. return;
  3872. if (depth--) {
  3873. struct buffer_head *bh;
  3874. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  3875. p = last;
  3876. while (--p >= first) {
  3877. nr = le32_to_cpu(*p);
  3878. if (!nr)
  3879. continue; /* A hole */
  3880. /* Go read the buffer for the next level down */
  3881. bh = sb_bread(inode->i_sb, nr);
  3882. /*
  3883. * A read failure? Report error and clear slot
  3884. * (should be rare).
  3885. */
  3886. if (!bh) {
  3887. ext4_error(inode->i_sb, "ext4_free_branches",
  3888. "Read failure, inode=%lu, block=%llu",
  3889. inode->i_ino, nr);
  3890. continue;
  3891. }
  3892. /* This zaps the entire block. Bottom up. */
  3893. BUFFER_TRACE(bh, "free child branches");
  3894. ext4_free_branches(handle, inode, bh,
  3895. (__le32 *) bh->b_data,
  3896. (__le32 *) bh->b_data + addr_per_block,
  3897. depth);
  3898. /*
  3899. * We've probably journalled the indirect block several
  3900. * times during the truncate. But it's no longer
  3901. * needed and we now drop it from the transaction via
  3902. * jbd2_journal_revoke().
  3903. *
  3904. * That's easy if it's exclusively part of this
  3905. * transaction. But if it's part of the committing
  3906. * transaction then jbd2_journal_forget() will simply
  3907. * brelse() it. That means that if the underlying
  3908. * block is reallocated in ext4_get_block(),
  3909. * unmap_underlying_metadata() will find this block
  3910. * and will try to get rid of it. damn, damn.
  3911. *
  3912. * If this block has already been committed to the
  3913. * journal, a revoke record will be written. And
  3914. * revoke records must be emitted *before* clearing
  3915. * this block's bit in the bitmaps.
  3916. */
  3917. ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
  3918. /*
  3919. * Everything below this this pointer has been
  3920. * released. Now let this top-of-subtree go.
  3921. *
  3922. * We want the freeing of this indirect block to be
  3923. * atomic in the journal with the updating of the
  3924. * bitmap block which owns it. So make some room in
  3925. * the journal.
  3926. *
  3927. * We zero the parent pointer *after* freeing its
  3928. * pointee in the bitmaps, so if extend_transaction()
  3929. * for some reason fails to put the bitmap changes and
  3930. * the release into the same transaction, recovery
  3931. * will merely complain about releasing a free block,
  3932. * rather than leaking blocks.
  3933. */
  3934. if (ext4_handle_is_aborted(handle))
  3935. return;
  3936. if (try_to_extend_transaction(handle, inode)) {
  3937. ext4_mark_inode_dirty(handle, inode);
  3938. ext4_truncate_restart_trans(handle, inode,
  3939. blocks_for_truncate(inode));
  3940. }
  3941. ext4_free_blocks(handle, inode, 0, nr, 1,
  3942. EXT4_FREE_BLOCKS_METADATA);
  3943. if (parent_bh) {
  3944. /*
  3945. * The block which we have just freed is
  3946. * pointed to by an indirect block: journal it
  3947. */
  3948. BUFFER_TRACE(parent_bh, "get_write_access");
  3949. if (!ext4_journal_get_write_access(handle,
  3950. parent_bh)){
  3951. *p = 0;
  3952. BUFFER_TRACE(parent_bh,
  3953. "call ext4_handle_dirty_metadata");
  3954. ext4_handle_dirty_metadata(handle,
  3955. inode,
  3956. parent_bh);
  3957. }
  3958. }
  3959. }
  3960. } else {
  3961. /* We have reached the bottom of the tree. */
  3962. BUFFER_TRACE(parent_bh, "free data blocks");
  3963. ext4_free_data(handle, inode, parent_bh, first, last);
  3964. }
  3965. }
  3966. int ext4_can_truncate(struct inode *inode)
  3967. {
  3968. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  3969. return 0;
  3970. if (S_ISREG(inode->i_mode))
  3971. return 1;
  3972. if (S_ISDIR(inode->i_mode))
  3973. return 1;
  3974. if (S_ISLNK(inode->i_mode))
  3975. return !ext4_inode_is_fast_symlink(inode);
  3976. return 0;
  3977. }
  3978. /*
  3979. * ext4_truncate()
  3980. *
  3981. * We block out ext4_get_block() block instantiations across the entire
  3982. * transaction, and VFS/VM ensures that ext4_truncate() cannot run
  3983. * simultaneously on behalf of the same inode.
  3984. *
  3985. * As we work through the truncate and commmit bits of it to the journal there
  3986. * is one core, guiding principle: the file's tree must always be consistent on
  3987. * disk. We must be able to restart the truncate after a crash.
  3988. *
  3989. * The file's tree may be transiently inconsistent in memory (although it
  3990. * probably isn't), but whenever we close off and commit a journal transaction,
  3991. * the contents of (the filesystem + the journal) must be consistent and
  3992. * restartable. It's pretty simple, really: bottom up, right to left (although
  3993. * left-to-right works OK too).
  3994. *
  3995. * Note that at recovery time, journal replay occurs *before* the restart of
  3996. * truncate against the orphan inode list.
  3997. *
  3998. * The committed inode has the new, desired i_size (which is the same as
  3999. * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
  4000. * that this inode's truncate did not complete and it will again call
  4001. * ext4_truncate() to have another go. So there will be instantiated blocks
  4002. * to the right of the truncation point in a crashed ext4 filesystem. But
  4003. * that's fine - as long as they are linked from the inode, the post-crash
  4004. * ext4_truncate() run will find them and release them.
  4005. */
  4006. void ext4_truncate(struct inode *inode)
  4007. {
  4008. handle_t *handle;
  4009. struct ext4_inode_info *ei = EXT4_I(inode);
  4010. __le32 *i_data = ei->i_data;
  4011. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  4012. struct address_space *mapping = inode->i_mapping;
  4013. ext4_lblk_t offsets[4];
  4014. Indirect chain[4];
  4015. Indirect *partial;
  4016. __le32 nr = 0;
  4017. int n;
  4018. ext4_lblk_t last_block;
  4019. unsigned blocksize = inode->i_sb->s_blocksize;
  4020. if (!ext4_can_truncate(inode))
  4021. return;
  4022. if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
  4023. ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
  4024. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  4025. ext4_ext_truncate(inode);
  4026. return;
  4027. }
  4028. handle = start_transaction(inode);
  4029. if (IS_ERR(handle))
  4030. return; /* AKPM: return what? */
  4031. last_block = (inode->i_size + blocksize-1)
  4032. >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
  4033. if (inode->i_size & (blocksize - 1))
  4034. if (ext4_block_truncate_page(handle, mapping, inode->i_size))
  4035. goto out_stop;
  4036. n = ext4_block_to_path(inode, last_block, offsets, NULL);
  4037. if (n == 0)
  4038. goto out_stop; /* error */
  4039. /*
  4040. * OK. This truncate is going to happen. We add the inode to the
  4041. * orphan list, so that if this truncate spans multiple transactions,
  4042. * and we crash, we will resume the truncate when the filesystem
  4043. * recovers. It also marks the inode dirty, to catch the new size.
  4044. *
  4045. * Implication: the file must always be in a sane, consistent
  4046. * truncatable state while each transaction commits.
  4047. */
  4048. if (ext4_orphan_add(handle, inode))
  4049. goto out_stop;
  4050. /*
  4051. * From here we block out all ext4_get_block() callers who want to
  4052. * modify the block allocation tree.
  4053. */
  4054. down_write(&ei->i_data_sem);
  4055. ext4_discard_preallocations(inode);
  4056. /*
  4057. * The orphan list entry will now protect us from any crash which
  4058. * occurs before the truncate completes, so it is now safe to propagate
  4059. * the new, shorter inode size (held for now in i_size) into the
  4060. * on-disk inode. We do this via i_disksize, which is the value which
  4061. * ext4 *really* writes onto the disk inode.
  4062. */
  4063. ei->i_disksize = inode->i_size;
  4064. if (n == 1) { /* direct blocks */
  4065. ext4_free_data(handle, inode, NULL, i_data+offsets[0],
  4066. i_data + EXT4_NDIR_BLOCKS);
  4067. goto do_indirects;
  4068. }
  4069. partial = ext4_find_shared(inode, n, offsets, chain, &nr);
  4070. /* Kill the top of shared branch (not detached) */
  4071. if (nr) {
  4072. if (partial == chain) {
  4073. /* Shared branch grows from the inode */
  4074. ext4_free_branches(handle, inode, NULL,
  4075. &nr, &nr+1, (chain+n-1) - partial);
  4076. *partial->p = 0;
  4077. /*
  4078. * We mark the inode dirty prior to restart,
  4079. * and prior to stop. No need for it here.
  4080. */
  4081. } else {
  4082. /* Shared branch grows from an indirect block */
  4083. BUFFER_TRACE(partial->bh, "get_write_access");
  4084. ext4_free_branches(handle, inode, partial->bh,
  4085. partial->p,
  4086. partial->p+1, (chain+n-1) - partial);
  4087. }
  4088. }
  4089. /* Clear the ends of indirect blocks on the shared branch */
  4090. while (partial > chain) {
  4091. ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
  4092. (__le32*)partial->bh->b_data+addr_per_block,
  4093. (chain+n-1) - partial);
  4094. BUFFER_TRACE(partial->bh, "call brelse");
  4095. brelse(partial->bh);
  4096. partial--;
  4097. }
  4098. do_indirects:
  4099. /* Kill the remaining (whole) subtrees */
  4100. switch (offsets[0]) {
  4101. default:
  4102. nr = i_data[EXT4_IND_BLOCK];
  4103. if (nr) {
  4104. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
  4105. i_data[EXT4_IND_BLOCK] = 0;
  4106. }
  4107. case EXT4_IND_BLOCK:
  4108. nr = i_data[EXT4_DIND_BLOCK];
  4109. if (nr) {
  4110. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
  4111. i_data[EXT4_DIND_BLOCK] = 0;
  4112. }
  4113. case EXT4_DIND_BLOCK:
  4114. nr = i_data[EXT4_TIND_BLOCK];
  4115. if (nr) {
  4116. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
  4117. i_data[EXT4_TIND_BLOCK] = 0;
  4118. }
  4119. case EXT4_TIND_BLOCK:
  4120. ;
  4121. }
  4122. up_write(&ei->i_data_sem);
  4123. inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
  4124. ext4_mark_inode_dirty(handle, inode);
  4125. /*
  4126. * In a multi-transaction truncate, we only make the final transaction
  4127. * synchronous
  4128. */
  4129. if (IS_SYNC(inode))
  4130. ext4_handle_sync(handle);
  4131. out_stop:
  4132. /*
  4133. * If this was a simple ftruncate(), and the file will remain alive
  4134. * then we need to clear up the orphan record which we created above.
  4135. * However, if this was a real unlink then we were called by
  4136. * ext4_delete_inode(), and we allow that function to clean up the
  4137. * orphan info for us.
  4138. */
  4139. if (inode->i_nlink)
  4140. ext4_orphan_del(handle, inode);
  4141. ext4_journal_stop(handle);
  4142. }
  4143. /*
  4144. * ext4_get_inode_loc returns with an extra refcount against the inode's
  4145. * underlying buffer_head on success. If 'in_mem' is true, we have all
  4146. * data in memory that is needed to recreate the on-disk version of this
  4147. * inode.
  4148. */
  4149. static int __ext4_get_inode_loc(struct inode *inode,
  4150. struct ext4_iloc *iloc, int in_mem)
  4151. {
  4152. struct ext4_group_desc *gdp;
  4153. struct buffer_head *bh;
  4154. struct super_block *sb = inode->i_sb;
  4155. ext4_fsblk_t block;
  4156. int inodes_per_block, inode_offset;
  4157. iloc->bh = NULL;
  4158. if (!ext4_valid_inum(sb, inode->i_ino))
  4159. return -EIO;
  4160. iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
  4161. gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
  4162. if (!gdp)
  4163. return -EIO;
  4164. /*
  4165. * Figure out the offset within the block group inode table
  4166. */
  4167. inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
  4168. inode_offset = ((inode->i_ino - 1) %
  4169. EXT4_INODES_PER_GROUP(sb));
  4170. block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
  4171. iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
  4172. bh = sb_getblk(sb, block);
  4173. if (!bh) {
  4174. ext4_error(sb, "ext4_get_inode_loc", "unable to read "
  4175. "inode block - inode=%lu, block=%llu",
  4176. inode->i_ino, block);
  4177. return -EIO;
  4178. }
  4179. if (!buffer_uptodate(bh)) {
  4180. lock_buffer(bh);
  4181. /*
  4182. * If the buffer has the write error flag, we have failed
  4183. * to write out another inode in the same block. In this
  4184. * case, we don't have to read the block because we may
  4185. * read the old inode data successfully.
  4186. */
  4187. if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
  4188. set_buffer_uptodate(bh);
  4189. if (buffer_uptodate(bh)) {
  4190. /* someone brought it uptodate while we waited */
  4191. unlock_buffer(bh);
  4192. goto has_buffer;
  4193. }
  4194. /*
  4195. * If we have all information of the inode in memory and this
  4196. * is the only valid inode in the block, we need not read the
  4197. * block.
  4198. */
  4199. if (in_mem) {
  4200. struct buffer_head *bitmap_bh;
  4201. int i, start;
  4202. start = inode_offset & ~(inodes_per_block - 1);
  4203. /* Is the inode bitmap in cache? */
  4204. bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
  4205. if (!bitmap_bh)
  4206. goto make_io;
  4207. /*
  4208. * If the inode bitmap isn't in cache then the
  4209. * optimisation may end up performing two reads instead
  4210. * of one, so skip it.
  4211. */
  4212. if (!buffer_uptodate(bitmap_bh)) {
  4213. brelse(bitmap_bh);
  4214. goto make_io;
  4215. }
  4216. for (i = start; i < start + inodes_per_block; i++) {
  4217. if (i == inode_offset)
  4218. continue;
  4219. if (ext4_test_bit(i, bitmap_bh->b_data))
  4220. break;
  4221. }
  4222. brelse(bitmap_bh);
  4223. if (i == start + inodes_per_block) {
  4224. /* all other inodes are free, so skip I/O */
  4225. memset(bh->b_data, 0, bh->b_size);
  4226. set_buffer_uptodate(bh);
  4227. unlock_buffer(bh);
  4228. goto has_buffer;
  4229. }
  4230. }
  4231. make_io:
  4232. /*
  4233. * If we need to do any I/O, try to pre-readahead extra
  4234. * blocks from the inode table.
  4235. */
  4236. if (EXT4_SB(sb)->s_inode_readahead_blks) {
  4237. ext4_fsblk_t b, end, table;
  4238. unsigned num;
  4239. table = ext4_inode_table(sb, gdp);
  4240. /* s_inode_readahead_blks is always a power of 2 */
  4241. b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
  4242. if (table > b)
  4243. b = table;
  4244. end = b + EXT4_SB(sb)->s_inode_readahead_blks;
  4245. num = EXT4_INODES_PER_GROUP(sb);
  4246. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  4247. EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
  4248. num -= ext4_itable_unused_count(sb, gdp);
  4249. table += num / inodes_per_block;
  4250. if (end > table)
  4251. end = table;
  4252. while (b <= end)
  4253. sb_breadahead(sb, b++);
  4254. }
  4255. /*
  4256. * There are other valid inodes in the buffer, this inode
  4257. * has in-inode xattrs, or we don't have this inode in memory.
  4258. * Read the block from disk.
  4259. */
  4260. get_bh(bh);
  4261. bh->b_end_io = end_buffer_read_sync;
  4262. submit_bh(READ_META, bh);
  4263. wait_on_buffer(bh);
  4264. if (!buffer_uptodate(bh)) {
  4265. ext4_error(sb, __func__,
  4266. "unable to read inode block - inode=%lu, "
  4267. "block=%llu", inode->i_ino, block);
  4268. brelse(bh);
  4269. return -EIO;
  4270. }
  4271. }
  4272. has_buffer:
  4273. iloc->bh = bh;
  4274. return 0;
  4275. }
  4276. int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
  4277. {
  4278. /* We have all inode data except xattrs in memory here. */
  4279. return __ext4_get_inode_loc(inode, iloc,
  4280. !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
  4281. }
  4282. void ext4_set_inode_flags(struct inode *inode)
  4283. {
  4284. unsigned int flags = EXT4_I(inode)->i_flags;
  4285. inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
  4286. if (flags & EXT4_SYNC_FL)
  4287. inode->i_flags |= S_SYNC;
  4288. if (flags & EXT4_APPEND_FL)
  4289. inode->i_flags |= S_APPEND;
  4290. if (flags & EXT4_IMMUTABLE_FL)
  4291. inode->i_flags |= S_IMMUTABLE;
  4292. if (flags & EXT4_NOATIME_FL)
  4293. inode->i_flags |= S_NOATIME;
  4294. if (flags & EXT4_DIRSYNC_FL)
  4295. inode->i_flags |= S_DIRSYNC;
  4296. }
  4297. /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
  4298. void ext4_get_inode_flags(struct ext4_inode_info *ei)
  4299. {
  4300. unsigned int flags = ei->vfs_inode.i_flags;
  4301. ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
  4302. EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
  4303. if (flags & S_SYNC)
  4304. ei->i_flags |= EXT4_SYNC_FL;
  4305. if (flags & S_APPEND)
  4306. ei->i_flags |= EXT4_APPEND_FL;
  4307. if (flags & S_IMMUTABLE)
  4308. ei->i_flags |= EXT4_IMMUTABLE_FL;
  4309. if (flags & S_NOATIME)
  4310. ei->i_flags |= EXT4_NOATIME_FL;
  4311. if (flags & S_DIRSYNC)
  4312. ei->i_flags |= EXT4_DIRSYNC_FL;
  4313. }
  4314. static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
  4315. struct ext4_inode_info *ei)
  4316. {
  4317. blkcnt_t i_blocks ;
  4318. struct inode *inode = &(ei->vfs_inode);
  4319. struct super_block *sb = inode->i_sb;
  4320. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  4321. EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
  4322. /* we are using combined 48 bit field */
  4323. i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
  4324. le32_to_cpu(raw_inode->i_blocks_lo);
  4325. if (ei->i_flags & EXT4_HUGE_FILE_FL) {
  4326. /* i_blocks represent file system block size */
  4327. return i_blocks << (inode->i_blkbits - 9);
  4328. } else {
  4329. return i_blocks;
  4330. }
  4331. } else {
  4332. return le32_to_cpu(raw_inode->i_blocks_lo);
  4333. }
  4334. }
  4335. struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
  4336. {
  4337. struct ext4_iloc iloc;
  4338. struct ext4_inode *raw_inode;
  4339. struct ext4_inode_info *ei;
  4340. struct inode *inode;
  4341. journal_t *journal = EXT4_SB(sb)->s_journal;
  4342. long ret;
  4343. int block;
  4344. inode = iget_locked(sb, ino);
  4345. if (!inode)
  4346. return ERR_PTR(-ENOMEM);
  4347. if (!(inode->i_state & I_NEW))
  4348. return inode;
  4349. ei = EXT4_I(inode);
  4350. iloc.bh = 0;
  4351. ret = __ext4_get_inode_loc(inode, &iloc, 0);
  4352. if (ret < 0)
  4353. goto bad_inode;
  4354. raw_inode = ext4_raw_inode(&iloc);
  4355. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  4356. inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
  4357. inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
  4358. if (!(test_opt(inode->i_sb, NO_UID32))) {
  4359. inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
  4360. inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
  4361. }
  4362. inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
  4363. ei->i_state = 0;
  4364. ei->i_dir_start_lookup = 0;
  4365. ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
  4366. /* We now have enough fields to check if the inode was active or not.
  4367. * This is needed because nfsd might try to access dead inodes
  4368. * the test is that same one that e2fsck uses
  4369. * NeilBrown 1999oct15
  4370. */
  4371. if (inode->i_nlink == 0) {
  4372. if (inode->i_mode == 0 ||
  4373. !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
  4374. /* this inode is deleted */
  4375. ret = -ESTALE;
  4376. goto bad_inode;
  4377. }
  4378. /* The only unlinked inodes we let through here have
  4379. * valid i_mode and are being read by the orphan
  4380. * recovery code: that's fine, we're about to complete
  4381. * the process of deleting those. */
  4382. }
  4383. ei->i_flags = le32_to_cpu(raw_inode->i_flags);
  4384. inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
  4385. ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
  4386. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
  4387. ei->i_file_acl |=
  4388. ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
  4389. inode->i_size = ext4_isize(raw_inode);
  4390. ei->i_disksize = inode->i_size;
  4391. #ifdef CONFIG_QUOTA
  4392. ei->i_reserved_quota = 0;
  4393. #endif
  4394. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  4395. ei->i_block_group = iloc.block_group;
  4396. ei->i_last_alloc_group = ~0;
  4397. /*
  4398. * NOTE! The in-memory inode i_data array is in little-endian order
  4399. * even on big-endian machines: we do NOT byteswap the block numbers!
  4400. */
  4401. for (block = 0; block < EXT4_N_BLOCKS; block++)
  4402. ei->i_data[block] = raw_inode->i_block[block];
  4403. INIT_LIST_HEAD(&ei->i_orphan);
  4404. /*
  4405. * Set transaction id's of transactions that have to be committed
  4406. * to finish f[data]sync. We set them to currently running transaction
  4407. * as we cannot be sure that the inode or some of its metadata isn't
  4408. * part of the transaction - the inode could have been reclaimed and
  4409. * now it is reread from disk.
  4410. */
  4411. if (journal) {
  4412. transaction_t *transaction;
  4413. tid_t tid;
  4414. spin_lock(&journal->j_state_lock);
  4415. if (journal->j_running_transaction)
  4416. transaction = journal->j_running_transaction;
  4417. else
  4418. transaction = journal->j_committing_transaction;
  4419. if (transaction)
  4420. tid = transaction->t_tid;
  4421. else
  4422. tid = journal->j_commit_sequence;
  4423. spin_unlock(&journal->j_state_lock);
  4424. ei->i_sync_tid = tid;
  4425. ei->i_datasync_tid = tid;
  4426. }
  4427. if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  4428. ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
  4429. if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
  4430. EXT4_INODE_SIZE(inode->i_sb)) {
  4431. ret = -EIO;
  4432. goto bad_inode;
  4433. }
  4434. if (ei->i_extra_isize == 0) {
  4435. /* The extra space is currently unused. Use it. */
  4436. ei->i_extra_isize = sizeof(struct ext4_inode) -
  4437. EXT4_GOOD_OLD_INODE_SIZE;
  4438. } else {
  4439. __le32 *magic = (void *)raw_inode +
  4440. EXT4_GOOD_OLD_INODE_SIZE +
  4441. ei->i_extra_isize;
  4442. if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
  4443. ei->i_state |= EXT4_STATE_XATTR;
  4444. }
  4445. } else
  4446. ei->i_extra_isize = 0;
  4447. EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
  4448. EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
  4449. EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
  4450. EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
  4451. inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
  4452. if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  4453. if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
  4454. inode->i_version |=
  4455. (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
  4456. }
  4457. ret = 0;
  4458. if (ei->i_file_acl &&
  4459. !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
  4460. ext4_error(sb, __func__,
  4461. "bad extended attribute block %llu in inode #%lu",
  4462. ei->i_file_acl, inode->i_ino);
  4463. ret = -EIO;
  4464. goto bad_inode;
  4465. } else if (ei->i_flags & EXT4_EXTENTS_FL) {
  4466. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  4467. (S_ISLNK(inode->i_mode) &&
  4468. !ext4_inode_is_fast_symlink(inode)))
  4469. /* Validate extent which is part of inode */
  4470. ret = ext4_ext_check_inode(inode);
  4471. } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  4472. (S_ISLNK(inode->i_mode) &&
  4473. !ext4_inode_is_fast_symlink(inode))) {
  4474. /* Validate block references which are part of inode */
  4475. ret = ext4_check_inode_blockref(inode);
  4476. }
  4477. if (ret)
  4478. goto bad_inode;
  4479. if (S_ISREG(inode->i_mode)) {
  4480. inode->i_op = &ext4_file_inode_operations;
  4481. inode->i_fop = &ext4_file_operations;
  4482. ext4_set_aops(inode);
  4483. } else if (S_ISDIR(inode->i_mode)) {
  4484. inode->i_op = &ext4_dir_inode_operations;
  4485. inode->i_fop = &ext4_dir_operations;
  4486. } else if (S_ISLNK(inode->i_mode)) {
  4487. if (ext4_inode_is_fast_symlink(inode)) {
  4488. inode->i_op = &ext4_fast_symlink_inode_operations;
  4489. nd_terminate_link(ei->i_data, inode->i_size,
  4490. sizeof(ei->i_data) - 1);
  4491. } else {
  4492. inode->i_op = &ext4_symlink_inode_operations;
  4493. ext4_set_aops(inode);
  4494. }
  4495. } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  4496. S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  4497. inode->i_op = &ext4_special_inode_operations;
  4498. if (raw_inode->i_block[0])
  4499. init_special_inode(inode, inode->i_mode,
  4500. old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
  4501. else
  4502. init_special_inode(inode, inode->i_mode,
  4503. new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
  4504. } else {
  4505. ret = -EIO;
  4506. ext4_error(inode->i_sb, __func__,
  4507. "bogus i_mode (%o) for inode=%lu",
  4508. inode->i_mode, inode->i_ino);
  4509. goto bad_inode;
  4510. }
  4511. brelse(iloc.bh);
  4512. ext4_set_inode_flags(inode);
  4513. unlock_new_inode(inode);
  4514. return inode;
  4515. bad_inode:
  4516. brelse(iloc.bh);
  4517. iget_failed(inode);
  4518. return ERR_PTR(ret);
  4519. }
  4520. static int ext4_inode_blocks_set(handle_t *handle,
  4521. struct ext4_inode *raw_inode,
  4522. struct ext4_inode_info *ei)
  4523. {
  4524. struct inode *inode = &(ei->vfs_inode);
  4525. u64 i_blocks = inode->i_blocks;
  4526. struct super_block *sb = inode->i_sb;
  4527. if (i_blocks <= ~0U) {
  4528. /*
  4529. * i_blocks can be represnted in a 32 bit variable
  4530. * as multiple of 512 bytes
  4531. */
  4532. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  4533. raw_inode->i_blocks_high = 0;
  4534. ei->i_flags &= ~EXT4_HUGE_FILE_FL;
  4535. return 0;
  4536. }
  4537. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
  4538. return -EFBIG;
  4539. if (i_blocks <= 0xffffffffffffULL) {
  4540. /*
  4541. * i_blocks can be represented in a 48 bit variable
  4542. * as multiple of 512 bytes
  4543. */
  4544. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  4545. raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
  4546. ei->i_flags &= ~EXT4_HUGE_FILE_FL;
  4547. } else {
  4548. ei->i_flags |= EXT4_HUGE_FILE_FL;
  4549. /* i_block is stored in file system block size */
  4550. i_blocks = i_blocks >> (inode->i_blkbits - 9);
  4551. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  4552. raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
  4553. }
  4554. return 0;
  4555. }
  4556. /*
  4557. * Post the struct inode info into an on-disk inode location in the
  4558. * buffer-cache. This gobbles the caller's reference to the
  4559. * buffer_head in the inode location struct.
  4560. *
  4561. * The caller must have write access to iloc->bh.
  4562. */
  4563. static int ext4_do_update_inode(handle_t *handle,
  4564. struct inode *inode,
  4565. struct ext4_iloc *iloc)
  4566. {
  4567. struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
  4568. struct ext4_inode_info *ei = EXT4_I(inode);
  4569. struct buffer_head *bh = iloc->bh;
  4570. int err = 0, rc, block;
  4571. /* For fields not not tracking in the in-memory inode,
  4572. * initialise them to zero for new inodes. */
  4573. if (ei->i_state & EXT4_STATE_NEW)
  4574. memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
  4575. ext4_get_inode_flags(ei);
  4576. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  4577. if (!(test_opt(inode->i_sb, NO_UID32))) {
  4578. raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
  4579. raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
  4580. /*
  4581. * Fix up interoperability with old kernels. Otherwise, old inodes get
  4582. * re-used with the upper 16 bits of the uid/gid intact
  4583. */
  4584. if (!ei->i_dtime) {
  4585. raw_inode->i_uid_high =
  4586. cpu_to_le16(high_16_bits(inode->i_uid));
  4587. raw_inode->i_gid_high =
  4588. cpu_to_le16(high_16_bits(inode->i_gid));
  4589. } else {
  4590. raw_inode->i_uid_high = 0;
  4591. raw_inode->i_gid_high = 0;
  4592. }
  4593. } else {
  4594. raw_inode->i_uid_low =
  4595. cpu_to_le16(fs_high2lowuid(inode->i_uid));
  4596. raw_inode->i_gid_low =
  4597. cpu_to_le16(fs_high2lowgid(inode->i_gid));
  4598. raw_inode->i_uid_high = 0;
  4599. raw_inode->i_gid_high = 0;
  4600. }
  4601. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  4602. EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
  4603. EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
  4604. EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
  4605. EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
  4606. if (ext4_inode_blocks_set(handle, raw_inode, ei))
  4607. goto out_brelse;
  4608. raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
  4609. raw_inode->i_flags = cpu_to_le32(ei->i_flags);
  4610. if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  4611. cpu_to_le32(EXT4_OS_HURD))
  4612. raw_inode->i_file_acl_high =
  4613. cpu_to_le16(ei->i_file_acl >> 32);
  4614. raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
  4615. ext4_isize_set(raw_inode, ei->i_disksize);
  4616. if (ei->i_disksize > 0x7fffffffULL) {
  4617. struct super_block *sb = inode->i_sb;
  4618. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
  4619. EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
  4620. EXT4_SB(sb)->s_es->s_rev_level ==
  4621. cpu_to_le32(EXT4_GOOD_OLD_REV)) {
  4622. /* If this is the first large file
  4623. * created, add a flag to the superblock.
  4624. */
  4625. err = ext4_journal_get_write_access(handle,
  4626. EXT4_SB(sb)->s_sbh);
  4627. if (err)
  4628. goto out_brelse;
  4629. ext4_update_dynamic_rev(sb);
  4630. EXT4_SET_RO_COMPAT_FEATURE(sb,
  4631. EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
  4632. sb->s_dirt = 1;
  4633. ext4_handle_sync(handle);
  4634. err = ext4_handle_dirty_metadata(handle, inode,
  4635. EXT4_SB(sb)->s_sbh);
  4636. }
  4637. }
  4638. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  4639. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  4640. if (old_valid_dev(inode->i_rdev)) {
  4641. raw_inode->i_block[0] =
  4642. cpu_to_le32(old_encode_dev(inode->i_rdev));
  4643. raw_inode->i_block[1] = 0;
  4644. } else {
  4645. raw_inode->i_block[0] = 0;
  4646. raw_inode->i_block[1] =
  4647. cpu_to_le32(new_encode_dev(inode->i_rdev));
  4648. raw_inode->i_block[2] = 0;
  4649. }
  4650. } else
  4651. for (block = 0; block < EXT4_N_BLOCKS; block++)
  4652. raw_inode->i_block[block] = ei->i_data[block];
  4653. raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
  4654. if (ei->i_extra_isize) {
  4655. if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
  4656. raw_inode->i_version_hi =
  4657. cpu_to_le32(inode->i_version >> 32);
  4658. raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
  4659. }
  4660. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  4661. rc = ext4_handle_dirty_metadata(handle, inode, bh);
  4662. if (!err)
  4663. err = rc;
  4664. ei->i_state &= ~EXT4_STATE_NEW;
  4665. ext4_update_inode_fsync_trans(handle, inode, 0);
  4666. out_brelse:
  4667. brelse(bh);
  4668. ext4_std_error(inode->i_sb, err);
  4669. return err;
  4670. }
  4671. /*
  4672. * ext4_write_inode()
  4673. *
  4674. * We are called from a few places:
  4675. *
  4676. * - Within generic_file_write() for O_SYNC files.
  4677. * Here, there will be no transaction running. We wait for any running
  4678. * trasnaction to commit.
  4679. *
  4680. * - Within sys_sync(), kupdate and such.
  4681. * We wait on commit, if tol to.
  4682. *
  4683. * - Within prune_icache() (PF_MEMALLOC == true)
  4684. * Here we simply return. We can't afford to block kswapd on the
  4685. * journal commit.
  4686. *
  4687. * In all cases it is actually safe for us to return without doing anything,
  4688. * because the inode has been copied into a raw inode buffer in
  4689. * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
  4690. * knfsd.
  4691. *
  4692. * Note that we are absolutely dependent upon all inode dirtiers doing the
  4693. * right thing: they *must* call mark_inode_dirty() after dirtying info in
  4694. * which we are interested.
  4695. *
  4696. * It would be a bug for them to not do this. The code:
  4697. *
  4698. * mark_inode_dirty(inode)
  4699. * stuff();
  4700. * inode->i_size = expr;
  4701. *
  4702. * is in error because a kswapd-driven write_inode() could occur while
  4703. * `stuff()' is running, and the new i_size will be lost. Plus the inode
  4704. * will no longer be on the superblock's dirty inode list.
  4705. */
  4706. int ext4_write_inode(struct inode *inode, int wait)
  4707. {
  4708. int err;
  4709. if (current->flags & PF_MEMALLOC)
  4710. return 0;
  4711. if (EXT4_SB(inode->i_sb)->s_journal) {
  4712. if (ext4_journal_current_handle()) {
  4713. jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
  4714. dump_stack();
  4715. return -EIO;
  4716. }
  4717. if (!wait)
  4718. return 0;
  4719. err = ext4_force_commit(inode->i_sb);
  4720. } else {
  4721. struct ext4_iloc iloc;
  4722. err = ext4_get_inode_loc(inode, &iloc);
  4723. if (err)
  4724. return err;
  4725. if (wait)
  4726. sync_dirty_buffer(iloc.bh);
  4727. if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
  4728. ext4_error(inode->i_sb, __func__,
  4729. "IO error syncing inode, "
  4730. "inode=%lu, block=%llu",
  4731. inode->i_ino,
  4732. (unsigned long long)iloc.bh->b_blocknr);
  4733. err = -EIO;
  4734. }
  4735. }
  4736. return err;
  4737. }
  4738. /*
  4739. * ext4_setattr()
  4740. *
  4741. * Called from notify_change.
  4742. *
  4743. * We want to trap VFS attempts to truncate the file as soon as
  4744. * possible. In particular, we want to make sure that when the VFS
  4745. * shrinks i_size, we put the inode on the orphan list and modify
  4746. * i_disksize immediately, so that during the subsequent flushing of
  4747. * dirty pages and freeing of disk blocks, we can guarantee that any
  4748. * commit will leave the blocks being flushed in an unused state on
  4749. * disk. (On recovery, the inode will get truncated and the blocks will
  4750. * be freed, so we have a strong guarantee that no future commit will
  4751. * leave these blocks visible to the user.)
  4752. *
  4753. * Another thing we have to assure is that if we are in ordered mode
  4754. * and inode is still attached to the committing transaction, we must
  4755. * we start writeout of all the dirty pages which are being truncated.
  4756. * This way we are sure that all the data written in the previous
  4757. * transaction are already on disk (truncate waits for pages under
  4758. * writeback).
  4759. *
  4760. * Called with inode->i_mutex down.
  4761. */
  4762. int ext4_setattr(struct dentry *dentry, struct iattr *attr)
  4763. {
  4764. struct inode *inode = dentry->d_inode;
  4765. int error, rc = 0;
  4766. const unsigned int ia_valid = attr->ia_valid;
  4767. error = inode_change_ok(inode, attr);
  4768. if (error)
  4769. return error;
  4770. if (ia_valid & ATTR_SIZE)
  4771. dquot_initialize(inode);
  4772. if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
  4773. (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
  4774. handle_t *handle;
  4775. /* (user+group)*(old+new) structure, inode write (sb,
  4776. * inode block, ? - but truncate inode update has it) */
  4777. handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
  4778. EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
  4779. if (IS_ERR(handle)) {
  4780. error = PTR_ERR(handle);
  4781. goto err_out;
  4782. }
  4783. error = dquot_transfer(inode, attr);
  4784. if (error) {
  4785. ext4_journal_stop(handle);
  4786. return error;
  4787. }
  4788. /* Update corresponding info in inode so that everything is in
  4789. * one transaction */
  4790. if (attr->ia_valid & ATTR_UID)
  4791. inode->i_uid = attr->ia_uid;
  4792. if (attr->ia_valid & ATTR_GID)
  4793. inode->i_gid = attr->ia_gid;
  4794. error = ext4_mark_inode_dirty(handle, inode);
  4795. ext4_journal_stop(handle);
  4796. }
  4797. if (attr->ia_valid & ATTR_SIZE) {
  4798. if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
  4799. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  4800. if (attr->ia_size > sbi->s_bitmap_maxbytes) {
  4801. error = -EFBIG;
  4802. goto err_out;
  4803. }
  4804. }
  4805. }
  4806. if (S_ISREG(inode->i_mode) &&
  4807. attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
  4808. handle_t *handle;
  4809. handle = ext4_journal_start(inode, 3);
  4810. if (IS_ERR(handle)) {
  4811. error = PTR_ERR(handle);
  4812. goto err_out;
  4813. }
  4814. error = ext4_orphan_add(handle, inode);
  4815. EXT4_I(inode)->i_disksize = attr->ia_size;
  4816. rc = ext4_mark_inode_dirty(handle, inode);
  4817. if (!error)
  4818. error = rc;
  4819. ext4_journal_stop(handle);
  4820. if (ext4_should_order_data(inode)) {
  4821. error = ext4_begin_ordered_truncate(inode,
  4822. attr->ia_size);
  4823. if (error) {
  4824. /* Do as much error cleanup as possible */
  4825. handle = ext4_journal_start(inode, 3);
  4826. if (IS_ERR(handle)) {
  4827. ext4_orphan_del(NULL, inode);
  4828. goto err_out;
  4829. }
  4830. ext4_orphan_del(handle, inode);
  4831. ext4_journal_stop(handle);
  4832. goto err_out;
  4833. }
  4834. }
  4835. }
  4836. rc = inode_setattr(inode, attr);
  4837. /* If inode_setattr's call to ext4_truncate failed to get a
  4838. * transaction handle at all, we need to clean up the in-core
  4839. * orphan list manually. */
  4840. if (inode->i_nlink)
  4841. ext4_orphan_del(NULL, inode);
  4842. if (!rc && (ia_valid & ATTR_MODE))
  4843. rc = ext4_acl_chmod(inode);
  4844. err_out:
  4845. ext4_std_error(inode->i_sb, error);
  4846. if (!error)
  4847. error = rc;
  4848. return error;
  4849. }
  4850. int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
  4851. struct kstat *stat)
  4852. {
  4853. struct inode *inode;
  4854. unsigned long delalloc_blocks;
  4855. inode = dentry->d_inode;
  4856. generic_fillattr(inode, stat);
  4857. /*
  4858. * We can't update i_blocks if the block allocation is delayed
  4859. * otherwise in the case of system crash before the real block
  4860. * allocation is done, we will have i_blocks inconsistent with
  4861. * on-disk file blocks.
  4862. * We always keep i_blocks updated together with real
  4863. * allocation. But to not confuse with user, stat
  4864. * will return the blocks that include the delayed allocation
  4865. * blocks for this file.
  4866. */
  4867. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  4868. delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
  4869. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  4870. stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
  4871. return 0;
  4872. }
  4873. static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
  4874. int chunk)
  4875. {
  4876. int indirects;
  4877. /* if nrblocks are contiguous */
  4878. if (chunk) {
  4879. /*
  4880. * With N contiguous data blocks, it need at most
  4881. * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
  4882. * 2 dindirect blocks
  4883. * 1 tindirect block
  4884. */
  4885. indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
  4886. return indirects + 3;
  4887. }
  4888. /*
  4889. * if nrblocks are not contiguous, worse case, each block touch
  4890. * a indirect block, and each indirect block touch a double indirect
  4891. * block, plus a triple indirect block
  4892. */
  4893. indirects = nrblocks * 2 + 1;
  4894. return indirects;
  4895. }
  4896. static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  4897. {
  4898. if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
  4899. return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
  4900. return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
  4901. }
  4902. /*
  4903. * Account for index blocks, block groups bitmaps and block group
  4904. * descriptor blocks if modify datablocks and index blocks
  4905. * worse case, the indexs blocks spread over different block groups
  4906. *
  4907. * If datablocks are discontiguous, they are possible to spread over
  4908. * different block groups too. If they are contiuguous, with flexbg,
  4909. * they could still across block group boundary.
  4910. *
  4911. * Also account for superblock, inode, quota and xattr blocks
  4912. */
  4913. int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  4914. {
  4915. ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
  4916. int gdpblocks;
  4917. int idxblocks;
  4918. int ret = 0;
  4919. /*
  4920. * How many index blocks need to touch to modify nrblocks?
  4921. * The "Chunk" flag indicating whether the nrblocks is
  4922. * physically contiguous on disk
  4923. *
  4924. * For Direct IO and fallocate, they calls get_block to allocate
  4925. * one single extent at a time, so they could set the "Chunk" flag
  4926. */
  4927. idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
  4928. ret = idxblocks;
  4929. /*
  4930. * Now let's see how many group bitmaps and group descriptors need
  4931. * to account
  4932. */
  4933. groups = idxblocks;
  4934. if (chunk)
  4935. groups += 1;
  4936. else
  4937. groups += nrblocks;
  4938. gdpblocks = groups;
  4939. if (groups > ngroups)
  4940. groups = ngroups;
  4941. if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
  4942. gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
  4943. /* bitmaps and block group descriptor blocks */
  4944. ret += groups + gdpblocks;
  4945. /* Blocks for super block, inode, quota and xattr blocks */
  4946. ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
  4947. return ret;
  4948. }
  4949. /*
  4950. * Calulate the total number of credits to reserve to fit
  4951. * the modification of a single pages into a single transaction,
  4952. * which may include multiple chunks of block allocations.
  4953. *
  4954. * This could be called via ext4_write_begin()
  4955. *
  4956. * We need to consider the worse case, when
  4957. * one new block per extent.
  4958. */
  4959. int ext4_writepage_trans_blocks(struct inode *inode)
  4960. {
  4961. int bpp = ext4_journal_blocks_per_page(inode);
  4962. int ret;
  4963. ret = ext4_meta_trans_blocks(inode, bpp, 0);
  4964. /* Account for data blocks for journalled mode */
  4965. if (ext4_should_journal_data(inode))
  4966. ret += bpp;
  4967. return ret;
  4968. }
  4969. /*
  4970. * Calculate the journal credits for a chunk of data modification.
  4971. *
  4972. * This is called from DIO, fallocate or whoever calling
  4973. * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
  4974. *
  4975. * journal buffers for data blocks are not included here, as DIO
  4976. * and fallocate do no need to journal data buffers.
  4977. */
  4978. int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
  4979. {
  4980. return ext4_meta_trans_blocks(inode, nrblocks, 1);
  4981. }
  4982. /*
  4983. * The caller must have previously called ext4_reserve_inode_write().
  4984. * Give this, we know that the caller already has write access to iloc->bh.
  4985. */
  4986. int ext4_mark_iloc_dirty(handle_t *handle,
  4987. struct inode *inode, struct ext4_iloc *iloc)
  4988. {
  4989. int err = 0;
  4990. if (test_opt(inode->i_sb, I_VERSION))
  4991. inode_inc_iversion(inode);
  4992. /* the do_update_inode consumes one bh->b_count */
  4993. get_bh(iloc->bh);
  4994. /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
  4995. err = ext4_do_update_inode(handle, inode, iloc);
  4996. put_bh(iloc->bh);
  4997. return err;
  4998. }
  4999. /*
  5000. * On success, We end up with an outstanding reference count against
  5001. * iloc->bh. This _must_ be cleaned up later.
  5002. */
  5003. int
  5004. ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
  5005. struct ext4_iloc *iloc)
  5006. {
  5007. int err;
  5008. err = ext4_get_inode_loc(inode, iloc);
  5009. if (!err) {
  5010. BUFFER_TRACE(iloc->bh, "get_write_access");
  5011. err = ext4_journal_get_write_access(handle, iloc->bh);
  5012. if (err) {
  5013. brelse(iloc->bh);
  5014. iloc->bh = NULL;
  5015. }
  5016. }
  5017. ext4_std_error(inode->i_sb, err);
  5018. return err;
  5019. }
  5020. /*
  5021. * Expand an inode by new_extra_isize bytes.
  5022. * Returns 0 on success or negative error number on failure.
  5023. */
  5024. static int ext4_expand_extra_isize(struct inode *inode,
  5025. unsigned int new_extra_isize,
  5026. struct ext4_iloc iloc,
  5027. handle_t *handle)
  5028. {
  5029. struct ext4_inode *raw_inode;
  5030. struct ext4_xattr_ibody_header *header;
  5031. struct ext4_xattr_entry *entry;
  5032. if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
  5033. return 0;
  5034. raw_inode = ext4_raw_inode(&iloc);
  5035. header = IHDR(inode, raw_inode);
  5036. entry = IFIRST(header);
  5037. /* No extended attributes present */
  5038. if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
  5039. header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
  5040. memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
  5041. new_extra_isize);
  5042. EXT4_I(inode)->i_extra_isize = new_extra_isize;
  5043. return 0;
  5044. }
  5045. /* try to expand with EAs present */
  5046. return ext4_expand_extra_isize_ea(inode, new_extra_isize,
  5047. raw_inode, handle);
  5048. }
  5049. /*
  5050. * What we do here is to mark the in-core inode as clean with respect to inode
  5051. * dirtiness (it may still be data-dirty).
  5052. * This means that the in-core inode may be reaped by prune_icache
  5053. * without having to perform any I/O. This is a very good thing,
  5054. * because *any* task may call prune_icache - even ones which
  5055. * have a transaction open against a different journal.
  5056. *
  5057. * Is this cheating? Not really. Sure, we haven't written the
  5058. * inode out, but prune_icache isn't a user-visible syncing function.
  5059. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
  5060. * we start and wait on commits.
  5061. *
  5062. * Is this efficient/effective? Well, we're being nice to the system
  5063. * by cleaning up our inodes proactively so they can be reaped
  5064. * without I/O. But we are potentially leaving up to five seconds'
  5065. * worth of inodes floating about which prune_icache wants us to
  5066. * write out. One way to fix that would be to get prune_icache()
  5067. * to do a write_super() to free up some memory. It has the desired
  5068. * effect.
  5069. */
  5070. int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  5071. {
  5072. struct ext4_iloc iloc;
  5073. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  5074. static unsigned int mnt_count;
  5075. int err, ret;
  5076. might_sleep();
  5077. err = ext4_reserve_inode_write(handle, inode, &iloc);
  5078. if (ext4_handle_valid(handle) &&
  5079. EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
  5080. !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
  5081. /*
  5082. * We need extra buffer credits since we may write into EA block
  5083. * with this same handle. If journal_extend fails, then it will
  5084. * only result in a minor loss of functionality for that inode.
  5085. * If this is felt to be critical, then e2fsck should be run to
  5086. * force a large enough s_min_extra_isize.
  5087. */
  5088. if ((jbd2_journal_extend(handle,
  5089. EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
  5090. ret = ext4_expand_extra_isize(inode,
  5091. sbi->s_want_extra_isize,
  5092. iloc, handle);
  5093. if (ret) {
  5094. EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
  5095. if (mnt_count !=
  5096. le16_to_cpu(sbi->s_es->s_mnt_count)) {
  5097. ext4_warning(inode->i_sb, __func__,
  5098. "Unable to expand inode %lu. Delete"
  5099. " some EAs or run e2fsck.",
  5100. inode->i_ino);
  5101. mnt_count =
  5102. le16_to_cpu(sbi->s_es->s_mnt_count);
  5103. }
  5104. }
  5105. }
  5106. }
  5107. if (!err)
  5108. err = ext4_mark_iloc_dirty(handle, inode, &iloc);
  5109. return err;
  5110. }
  5111. /*
  5112. * ext4_dirty_inode() is called from __mark_inode_dirty()
  5113. *
  5114. * We're really interested in the case where a file is being extended.
  5115. * i_size has been changed by generic_commit_write() and we thus need
  5116. * to include the updated inode in the current transaction.
  5117. *
  5118. * Also, dquot_alloc_block() will always dirty the inode when blocks
  5119. * are allocated to the file.
  5120. *
  5121. * If the inode is marked synchronous, we don't honour that here - doing
  5122. * so would cause a commit on atime updates, which we don't bother doing.
  5123. * We handle synchronous inodes at the highest possible level.
  5124. */
  5125. void ext4_dirty_inode(struct inode *inode)
  5126. {
  5127. handle_t *handle;
  5128. handle = ext4_journal_start(inode, 2);
  5129. if (IS_ERR(handle))
  5130. goto out;
  5131. ext4_mark_inode_dirty(handle, inode);
  5132. ext4_journal_stop(handle);
  5133. out:
  5134. return;
  5135. }
  5136. #if 0
  5137. /*
  5138. * Bind an inode's backing buffer_head into this transaction, to prevent
  5139. * it from being flushed to disk early. Unlike
  5140. * ext4_reserve_inode_write, this leaves behind no bh reference and
  5141. * returns no iloc structure, so the caller needs to repeat the iloc
  5142. * lookup to mark the inode dirty later.
  5143. */
  5144. static int ext4_pin_inode(handle_t *handle, struct inode *inode)
  5145. {
  5146. struct ext4_iloc iloc;
  5147. int err = 0;
  5148. if (handle) {
  5149. err = ext4_get_inode_loc(inode, &iloc);
  5150. if (!err) {
  5151. BUFFER_TRACE(iloc.bh, "get_write_access");
  5152. err = jbd2_journal_get_write_access(handle, iloc.bh);
  5153. if (!err)
  5154. err = ext4_handle_dirty_metadata(handle,
  5155. inode,
  5156. iloc.bh);
  5157. brelse(iloc.bh);
  5158. }
  5159. }
  5160. ext4_std_error(inode->i_sb, err);
  5161. return err;
  5162. }
  5163. #endif
  5164. int ext4_change_inode_journal_flag(struct inode *inode, int val)
  5165. {
  5166. journal_t *journal;
  5167. handle_t *handle;
  5168. int err;
  5169. /*
  5170. * We have to be very careful here: changing a data block's
  5171. * journaling status dynamically is dangerous. If we write a
  5172. * data block to the journal, change the status and then delete
  5173. * that block, we risk forgetting to revoke the old log record
  5174. * from the journal and so a subsequent replay can corrupt data.
  5175. * So, first we make sure that the journal is empty and that
  5176. * nobody is changing anything.
  5177. */
  5178. journal = EXT4_JOURNAL(inode);
  5179. if (!journal)
  5180. return 0;
  5181. if (is_journal_aborted(journal))
  5182. return -EROFS;
  5183. jbd2_journal_lock_updates(journal);
  5184. jbd2_journal_flush(journal);
  5185. /*
  5186. * OK, there are no updates running now, and all cached data is
  5187. * synced to disk. We are now in a completely consistent state
  5188. * which doesn't have anything in the journal, and we know that
  5189. * no filesystem updates are running, so it is safe to modify
  5190. * the inode's in-core data-journaling state flag now.
  5191. */
  5192. if (val)
  5193. EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
  5194. else
  5195. EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
  5196. ext4_set_aops(inode);
  5197. jbd2_journal_unlock_updates(journal);
  5198. /* Finally we can mark the inode as dirty. */
  5199. handle = ext4_journal_start(inode, 1);
  5200. if (IS_ERR(handle))
  5201. return PTR_ERR(handle);
  5202. err = ext4_mark_inode_dirty(handle, inode);
  5203. ext4_handle_sync(handle);
  5204. ext4_journal_stop(handle);
  5205. ext4_std_error(inode->i_sb, err);
  5206. return err;
  5207. }
  5208. static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
  5209. {
  5210. return !buffer_mapped(bh);
  5211. }
  5212. int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  5213. {
  5214. struct page *page = vmf->page;
  5215. loff_t size;
  5216. unsigned long len;
  5217. int ret = -EINVAL;
  5218. void *fsdata;
  5219. struct file *file = vma->vm_file;
  5220. struct inode *inode = file->f_path.dentry->d_inode;
  5221. struct address_space *mapping = inode->i_mapping;
  5222. /*
  5223. * Get i_alloc_sem to stop truncates messing with the inode. We cannot
  5224. * get i_mutex because we are already holding mmap_sem.
  5225. */
  5226. down_read(&inode->i_alloc_sem);
  5227. size = i_size_read(inode);
  5228. if (page->mapping != mapping || size <= page_offset(page)
  5229. || !PageUptodate(page)) {
  5230. /* page got truncated from under us? */
  5231. goto out_unlock;
  5232. }
  5233. ret = 0;
  5234. if (PageMappedToDisk(page))
  5235. goto out_unlock;
  5236. if (page->index == size >> PAGE_CACHE_SHIFT)
  5237. len = size & ~PAGE_CACHE_MASK;
  5238. else
  5239. len = PAGE_CACHE_SIZE;
  5240. lock_page(page);
  5241. /*
  5242. * return if we have all the buffers mapped. This avoid
  5243. * the need to call write_begin/write_end which does a
  5244. * journal_start/journal_stop which can block and take
  5245. * long time
  5246. */
  5247. if (page_has_buffers(page)) {
  5248. if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
  5249. ext4_bh_unmapped)) {
  5250. unlock_page(page);
  5251. goto out_unlock;
  5252. }
  5253. }
  5254. unlock_page(page);
  5255. /*
  5256. * OK, we need to fill the hole... Do write_begin write_end
  5257. * to do block allocation/reservation.We are not holding
  5258. * inode.i__mutex here. That allow * parallel write_begin,
  5259. * write_end call. lock_page prevent this from happening
  5260. * on the same page though
  5261. */
  5262. ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
  5263. len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
  5264. if (ret < 0)
  5265. goto out_unlock;
  5266. ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
  5267. len, len, page, fsdata);
  5268. if (ret < 0)
  5269. goto out_unlock;
  5270. ret = 0;
  5271. out_unlock:
  5272. if (ret)
  5273. ret = VM_FAULT_SIGBUS;
  5274. up_read(&inode->i_alloc_sem);
  5275. return ret;
  5276. }