super.c 153 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411
  1. /*
  2. * linux/fs/ext4/super.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Big-endian to little-endian byte-swapping/bitmaps by
  16. * David S. Miller (davem@caip.rutgers.edu), 1995
  17. */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/fs.h>
  21. #include <linux/time.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/slab.h>
  24. #include <linux/init.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/parser.h>
  28. #include <linux/buffer_head.h>
  29. #include <linux/exportfs.h>
  30. #include <linux/vfs.h>
  31. #include <linux/random.h>
  32. #include <linux/mount.h>
  33. #include <linux/namei.h>
  34. #include <linux/quotaops.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/ctype.h>
  37. #include <linux/log2.h>
  38. #include <linux/crc16.h>
  39. #include <linux/cleancache.h>
  40. #include <asm/uaccess.h>
  41. #include <linux/kthread.h>
  42. #include <linux/freezer.h>
  43. #include "ext4.h"
  44. #include "ext4_extents.h" /* Needed for trace points definition */
  45. #include "ext4_jbd2.h"
  46. #include "xattr.h"
  47. #include "acl.h"
  48. #include "mballoc.h"
  49. #define CREATE_TRACE_POINTS
  50. #include <trace/events/ext4.h>
  51. static struct ext4_lazy_init *ext4_li_info;
  52. static struct mutex ext4_li_mtx;
  53. static int ext4_mballoc_ready;
  54. static struct ratelimit_state ext4_mount_msg_ratelimit;
  55. static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
  56. unsigned long journal_devnum);
  57. static int ext4_show_options(struct seq_file *seq, struct dentry *root);
  58. static int ext4_commit_super(struct super_block *sb, int sync);
  59. static void ext4_mark_recovery_complete(struct super_block *sb,
  60. struct ext4_super_block *es);
  61. static void ext4_clear_journal_err(struct super_block *sb,
  62. struct ext4_super_block *es);
  63. static int ext4_sync_fs(struct super_block *sb, int wait);
  64. static int ext4_remount(struct super_block *sb, int *flags, char *data);
  65. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
  66. static int ext4_unfreeze(struct super_block *sb);
  67. static int ext4_freeze(struct super_block *sb);
  68. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  69. const char *dev_name, void *data);
  70. static inline int ext2_feature_set_ok(struct super_block *sb);
  71. static inline int ext3_feature_set_ok(struct super_block *sb);
  72. static int ext4_feature_set_ok(struct super_block *sb, int readonly);
  73. static void ext4_destroy_lazyinit_thread(void);
  74. static void ext4_unregister_li_request(struct super_block *sb);
  75. static void ext4_clear_request_list(void);
  76. /*
  77. * Lock ordering
  78. *
  79. * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
  80. * i_mmap_rwsem (inode->i_mmap_rwsem)!
  81. *
  82. * page fault path:
  83. * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
  84. * page lock -> i_data_sem (rw)
  85. *
  86. * buffered write path:
  87. * sb_start_write -> i_mutex -> mmap_sem
  88. * sb_start_write -> i_mutex -> transaction start -> page lock ->
  89. * i_data_sem (rw)
  90. *
  91. * truncate:
  92. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
  93. * i_mmap_rwsem (w) -> page lock
  94. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
  95. * transaction start -> i_data_sem (rw)
  96. *
  97. * direct IO:
  98. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) -> mmap_sem
  99. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) ->
  100. * transaction start -> i_data_sem (rw)
  101. *
  102. * writepages:
  103. * transaction start -> page lock(s) -> i_data_sem (rw)
  104. */
  105. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  106. static struct file_system_type ext2_fs_type = {
  107. .owner = THIS_MODULE,
  108. .name = "ext2",
  109. .mount = ext4_mount,
  110. .kill_sb = kill_block_super,
  111. .fs_flags = FS_REQUIRES_DEV,
  112. };
  113. MODULE_ALIAS_FS("ext2");
  114. MODULE_ALIAS("ext2");
  115. #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
  116. #else
  117. #define IS_EXT2_SB(sb) (0)
  118. #endif
  119. static struct file_system_type ext3_fs_type = {
  120. .owner = THIS_MODULE,
  121. .name = "ext3",
  122. .mount = ext4_mount,
  123. .kill_sb = kill_block_super,
  124. .fs_flags = FS_REQUIRES_DEV,
  125. };
  126. MODULE_ALIAS_FS("ext3");
  127. MODULE_ALIAS("ext3");
  128. #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
  129. static int ext4_verify_csum_type(struct super_block *sb,
  130. struct ext4_super_block *es)
  131. {
  132. if (!ext4_has_feature_metadata_csum(sb))
  133. return 1;
  134. return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
  135. }
  136. static __le32 ext4_superblock_csum(struct super_block *sb,
  137. struct ext4_super_block *es)
  138. {
  139. struct ext4_sb_info *sbi = EXT4_SB(sb);
  140. int offset = offsetof(struct ext4_super_block, s_checksum);
  141. __u32 csum;
  142. csum = ext4_chksum(sbi, ~0, (char *)es, offset);
  143. return cpu_to_le32(csum);
  144. }
  145. static int ext4_superblock_csum_verify(struct super_block *sb,
  146. struct ext4_super_block *es)
  147. {
  148. if (!ext4_has_metadata_csum(sb))
  149. return 1;
  150. return es->s_checksum == ext4_superblock_csum(sb, es);
  151. }
  152. void ext4_superblock_csum_set(struct super_block *sb)
  153. {
  154. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  155. if (!ext4_has_metadata_csum(sb))
  156. return;
  157. es->s_checksum = ext4_superblock_csum(sb, es);
  158. }
  159. void *ext4_kvmalloc(size_t size, gfp_t flags)
  160. {
  161. void *ret;
  162. ret = kmalloc(size, flags | __GFP_NOWARN);
  163. if (!ret)
  164. ret = __vmalloc(size, flags, PAGE_KERNEL);
  165. return ret;
  166. }
  167. void *ext4_kvzalloc(size_t size, gfp_t flags)
  168. {
  169. void *ret;
  170. ret = kzalloc(size, flags | __GFP_NOWARN);
  171. if (!ret)
  172. ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
  173. return ret;
  174. }
  175. ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
  176. struct ext4_group_desc *bg)
  177. {
  178. return le32_to_cpu(bg->bg_block_bitmap_lo) |
  179. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  180. (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
  181. }
  182. ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
  183. struct ext4_group_desc *bg)
  184. {
  185. return le32_to_cpu(bg->bg_inode_bitmap_lo) |
  186. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  187. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
  188. }
  189. ext4_fsblk_t ext4_inode_table(struct super_block *sb,
  190. struct ext4_group_desc *bg)
  191. {
  192. return le32_to_cpu(bg->bg_inode_table_lo) |
  193. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  194. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
  195. }
  196. __u32 ext4_free_group_clusters(struct super_block *sb,
  197. struct ext4_group_desc *bg)
  198. {
  199. return le16_to_cpu(bg->bg_free_blocks_count_lo) |
  200. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  201. (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
  202. }
  203. __u32 ext4_free_inodes_count(struct super_block *sb,
  204. struct ext4_group_desc *bg)
  205. {
  206. return le16_to_cpu(bg->bg_free_inodes_count_lo) |
  207. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  208. (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
  209. }
  210. __u32 ext4_used_dirs_count(struct super_block *sb,
  211. struct ext4_group_desc *bg)
  212. {
  213. return le16_to_cpu(bg->bg_used_dirs_count_lo) |
  214. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  215. (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
  216. }
  217. __u32 ext4_itable_unused_count(struct super_block *sb,
  218. struct ext4_group_desc *bg)
  219. {
  220. return le16_to_cpu(bg->bg_itable_unused_lo) |
  221. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  222. (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
  223. }
  224. void ext4_block_bitmap_set(struct super_block *sb,
  225. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  226. {
  227. bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
  228. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  229. bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
  230. }
  231. void ext4_inode_bitmap_set(struct super_block *sb,
  232. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  233. {
  234. bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
  235. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  236. bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
  237. }
  238. void ext4_inode_table_set(struct super_block *sb,
  239. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  240. {
  241. bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
  242. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  243. bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
  244. }
  245. void ext4_free_group_clusters_set(struct super_block *sb,
  246. struct ext4_group_desc *bg, __u32 count)
  247. {
  248. bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
  249. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  250. bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
  251. }
  252. void ext4_free_inodes_set(struct super_block *sb,
  253. struct ext4_group_desc *bg, __u32 count)
  254. {
  255. bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
  256. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  257. bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
  258. }
  259. void ext4_used_dirs_set(struct super_block *sb,
  260. struct ext4_group_desc *bg, __u32 count)
  261. {
  262. bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
  263. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  264. bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
  265. }
  266. void ext4_itable_unused_set(struct super_block *sb,
  267. struct ext4_group_desc *bg, __u32 count)
  268. {
  269. bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
  270. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  271. bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
  272. }
  273. static void __save_error_info(struct super_block *sb, const char *func,
  274. unsigned int line)
  275. {
  276. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  277. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  278. if (bdev_read_only(sb->s_bdev))
  279. return;
  280. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  281. es->s_last_error_time = cpu_to_le32(get_seconds());
  282. strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
  283. es->s_last_error_line = cpu_to_le32(line);
  284. if (!es->s_first_error_time) {
  285. es->s_first_error_time = es->s_last_error_time;
  286. strncpy(es->s_first_error_func, func,
  287. sizeof(es->s_first_error_func));
  288. es->s_first_error_line = cpu_to_le32(line);
  289. es->s_first_error_ino = es->s_last_error_ino;
  290. es->s_first_error_block = es->s_last_error_block;
  291. }
  292. /*
  293. * Start the daily error reporting function if it hasn't been
  294. * started already
  295. */
  296. if (!es->s_error_count)
  297. mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
  298. le32_add_cpu(&es->s_error_count, 1);
  299. }
  300. static void save_error_info(struct super_block *sb, const char *func,
  301. unsigned int line)
  302. {
  303. __save_error_info(sb, func, line);
  304. ext4_commit_super(sb, 1);
  305. }
  306. /*
  307. * The del_gendisk() function uninitializes the disk-specific data
  308. * structures, including the bdi structure, without telling anyone
  309. * else. Once this happens, any attempt to call mark_buffer_dirty()
  310. * (for example, by ext4_commit_super), will cause a kernel OOPS.
  311. * This is a kludge to prevent these oops until we can put in a proper
  312. * hook in del_gendisk() to inform the VFS and file system layers.
  313. */
  314. static int block_device_ejected(struct super_block *sb)
  315. {
  316. struct inode *bd_inode = sb->s_bdev->bd_inode;
  317. struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
  318. return bdi->dev == NULL;
  319. }
  320. static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
  321. {
  322. struct super_block *sb = journal->j_private;
  323. struct ext4_sb_info *sbi = EXT4_SB(sb);
  324. int error = is_journal_aborted(journal);
  325. struct ext4_journal_cb_entry *jce;
  326. BUG_ON(txn->t_state == T_FINISHED);
  327. spin_lock(&sbi->s_md_lock);
  328. while (!list_empty(&txn->t_private_list)) {
  329. jce = list_entry(txn->t_private_list.next,
  330. struct ext4_journal_cb_entry, jce_list);
  331. list_del_init(&jce->jce_list);
  332. spin_unlock(&sbi->s_md_lock);
  333. jce->jce_func(sb, jce, error);
  334. spin_lock(&sbi->s_md_lock);
  335. }
  336. spin_unlock(&sbi->s_md_lock);
  337. }
  338. /* Deal with the reporting of failure conditions on a filesystem such as
  339. * inconsistencies detected or read IO failures.
  340. *
  341. * On ext2, we can store the error state of the filesystem in the
  342. * superblock. That is not possible on ext4, because we may have other
  343. * write ordering constraints on the superblock which prevent us from
  344. * writing it out straight away; and given that the journal is about to
  345. * be aborted, we can't rely on the current, or future, transactions to
  346. * write out the superblock safely.
  347. *
  348. * We'll just use the jbd2_journal_abort() error code to record an error in
  349. * the journal instead. On recovery, the journal will complain about
  350. * that error until we've noted it down and cleared it.
  351. */
  352. static void ext4_handle_error(struct super_block *sb)
  353. {
  354. if (sb->s_flags & MS_RDONLY)
  355. return;
  356. if (!test_opt(sb, ERRORS_CONT)) {
  357. journal_t *journal = EXT4_SB(sb)->s_journal;
  358. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  359. if (journal)
  360. jbd2_journal_abort(journal, -EIO);
  361. }
  362. if (test_opt(sb, ERRORS_RO)) {
  363. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  364. /*
  365. * Make sure updated value of ->s_mount_flags will be visible
  366. * before ->s_flags update
  367. */
  368. smp_wmb();
  369. sb->s_flags |= MS_RDONLY;
  370. }
  371. if (test_opt(sb, ERRORS_PANIC)) {
  372. if (EXT4_SB(sb)->s_journal &&
  373. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  374. return;
  375. panic("EXT4-fs (device %s): panic forced after error\n",
  376. sb->s_id);
  377. }
  378. }
  379. #define ext4_error_ratelimit(sb) \
  380. ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
  381. "EXT4-fs error")
  382. void __ext4_error(struct super_block *sb, const char *function,
  383. unsigned int line, const char *fmt, ...)
  384. {
  385. struct va_format vaf;
  386. va_list args;
  387. if (ext4_error_ratelimit(sb)) {
  388. va_start(args, fmt);
  389. vaf.fmt = fmt;
  390. vaf.va = &args;
  391. printk(KERN_CRIT
  392. "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
  393. sb->s_id, function, line, current->comm, &vaf);
  394. va_end(args);
  395. }
  396. save_error_info(sb, function, line);
  397. ext4_handle_error(sb);
  398. }
  399. void __ext4_error_inode(struct inode *inode, const char *function,
  400. unsigned int line, ext4_fsblk_t block,
  401. const char *fmt, ...)
  402. {
  403. va_list args;
  404. struct va_format vaf;
  405. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  406. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  407. es->s_last_error_block = cpu_to_le64(block);
  408. if (ext4_error_ratelimit(inode->i_sb)) {
  409. va_start(args, fmt);
  410. vaf.fmt = fmt;
  411. vaf.va = &args;
  412. if (block)
  413. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  414. "inode #%lu: block %llu: comm %s: %pV\n",
  415. inode->i_sb->s_id, function, line, inode->i_ino,
  416. block, current->comm, &vaf);
  417. else
  418. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  419. "inode #%lu: comm %s: %pV\n",
  420. inode->i_sb->s_id, function, line, inode->i_ino,
  421. current->comm, &vaf);
  422. va_end(args);
  423. }
  424. save_error_info(inode->i_sb, function, line);
  425. ext4_handle_error(inode->i_sb);
  426. }
  427. void __ext4_error_file(struct file *file, const char *function,
  428. unsigned int line, ext4_fsblk_t block,
  429. const char *fmt, ...)
  430. {
  431. va_list args;
  432. struct va_format vaf;
  433. struct ext4_super_block *es;
  434. struct inode *inode = file_inode(file);
  435. char pathname[80], *path;
  436. es = EXT4_SB(inode->i_sb)->s_es;
  437. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  438. if (ext4_error_ratelimit(inode->i_sb)) {
  439. path = file_path(file, pathname, sizeof(pathname));
  440. if (IS_ERR(path))
  441. path = "(unknown)";
  442. va_start(args, fmt);
  443. vaf.fmt = fmt;
  444. vaf.va = &args;
  445. if (block)
  446. printk(KERN_CRIT
  447. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  448. "block %llu: comm %s: path %s: %pV\n",
  449. inode->i_sb->s_id, function, line, inode->i_ino,
  450. block, current->comm, path, &vaf);
  451. else
  452. printk(KERN_CRIT
  453. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  454. "comm %s: path %s: %pV\n",
  455. inode->i_sb->s_id, function, line, inode->i_ino,
  456. current->comm, path, &vaf);
  457. va_end(args);
  458. }
  459. save_error_info(inode->i_sb, function, line);
  460. ext4_handle_error(inode->i_sb);
  461. }
  462. const char *ext4_decode_error(struct super_block *sb, int errno,
  463. char nbuf[16])
  464. {
  465. char *errstr = NULL;
  466. switch (errno) {
  467. case -EFSCORRUPTED:
  468. errstr = "Corrupt filesystem";
  469. break;
  470. case -EFSBADCRC:
  471. errstr = "Filesystem failed CRC";
  472. break;
  473. case -EIO:
  474. errstr = "IO failure";
  475. break;
  476. case -ENOMEM:
  477. errstr = "Out of memory";
  478. break;
  479. case -EROFS:
  480. if (!sb || (EXT4_SB(sb)->s_journal &&
  481. EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
  482. errstr = "Journal has aborted";
  483. else
  484. errstr = "Readonly filesystem";
  485. break;
  486. default:
  487. /* If the caller passed in an extra buffer for unknown
  488. * errors, textualise them now. Else we just return
  489. * NULL. */
  490. if (nbuf) {
  491. /* Check for truncated error codes... */
  492. if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
  493. errstr = nbuf;
  494. }
  495. break;
  496. }
  497. return errstr;
  498. }
  499. /* __ext4_std_error decodes expected errors from journaling functions
  500. * automatically and invokes the appropriate error response. */
  501. void __ext4_std_error(struct super_block *sb, const char *function,
  502. unsigned int line, int errno)
  503. {
  504. char nbuf[16];
  505. const char *errstr;
  506. /* Special case: if the error is EROFS, and we're not already
  507. * inside a transaction, then there's really no point in logging
  508. * an error. */
  509. if (errno == -EROFS && journal_current_handle() == NULL &&
  510. (sb->s_flags & MS_RDONLY))
  511. return;
  512. if (ext4_error_ratelimit(sb)) {
  513. errstr = ext4_decode_error(sb, errno, nbuf);
  514. printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
  515. sb->s_id, function, line, errstr);
  516. }
  517. save_error_info(sb, function, line);
  518. ext4_handle_error(sb);
  519. }
  520. /*
  521. * ext4_abort is a much stronger failure handler than ext4_error. The
  522. * abort function may be used to deal with unrecoverable failures such
  523. * as journal IO errors or ENOMEM at a critical moment in log management.
  524. *
  525. * We unconditionally force the filesystem into an ABORT|READONLY state,
  526. * unless the error response on the fs has been set to panic in which
  527. * case we take the easy way out and panic immediately.
  528. */
  529. void __ext4_abort(struct super_block *sb, const char *function,
  530. unsigned int line, const char *fmt, ...)
  531. {
  532. va_list args;
  533. save_error_info(sb, function, line);
  534. va_start(args, fmt);
  535. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
  536. function, line);
  537. vprintk(fmt, args);
  538. printk("\n");
  539. va_end(args);
  540. if ((sb->s_flags & MS_RDONLY) == 0) {
  541. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  542. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  543. /*
  544. * Make sure updated value of ->s_mount_flags will be visible
  545. * before ->s_flags update
  546. */
  547. smp_wmb();
  548. sb->s_flags |= MS_RDONLY;
  549. if (EXT4_SB(sb)->s_journal)
  550. jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
  551. save_error_info(sb, function, line);
  552. }
  553. if (test_opt(sb, ERRORS_PANIC)) {
  554. if (EXT4_SB(sb)->s_journal &&
  555. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  556. return;
  557. panic("EXT4-fs panic from previous error\n");
  558. }
  559. }
  560. void __ext4_msg(struct super_block *sb,
  561. const char *prefix, const char *fmt, ...)
  562. {
  563. struct va_format vaf;
  564. va_list args;
  565. if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
  566. return;
  567. va_start(args, fmt);
  568. vaf.fmt = fmt;
  569. vaf.va = &args;
  570. printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  571. va_end(args);
  572. }
  573. #define ext4_warning_ratelimit(sb) \
  574. ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
  575. "EXT4-fs warning")
  576. void __ext4_warning(struct super_block *sb, const char *function,
  577. unsigned int line, const char *fmt, ...)
  578. {
  579. struct va_format vaf;
  580. va_list args;
  581. if (!ext4_warning_ratelimit(sb))
  582. return;
  583. va_start(args, fmt);
  584. vaf.fmt = fmt;
  585. vaf.va = &args;
  586. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
  587. sb->s_id, function, line, &vaf);
  588. va_end(args);
  589. }
  590. void __ext4_warning_inode(const struct inode *inode, const char *function,
  591. unsigned int line, const char *fmt, ...)
  592. {
  593. struct va_format vaf;
  594. va_list args;
  595. if (!ext4_warning_ratelimit(inode->i_sb))
  596. return;
  597. va_start(args, fmt);
  598. vaf.fmt = fmt;
  599. vaf.va = &args;
  600. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
  601. "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
  602. function, line, inode->i_ino, current->comm, &vaf);
  603. va_end(args);
  604. }
  605. void __ext4_grp_locked_error(const char *function, unsigned int line,
  606. struct super_block *sb, ext4_group_t grp,
  607. unsigned long ino, ext4_fsblk_t block,
  608. const char *fmt, ...)
  609. __releases(bitlock)
  610. __acquires(bitlock)
  611. {
  612. struct va_format vaf;
  613. va_list args;
  614. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  615. es->s_last_error_ino = cpu_to_le32(ino);
  616. es->s_last_error_block = cpu_to_le64(block);
  617. __save_error_info(sb, function, line);
  618. if (ext4_error_ratelimit(sb)) {
  619. va_start(args, fmt);
  620. vaf.fmt = fmt;
  621. vaf.va = &args;
  622. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
  623. sb->s_id, function, line, grp);
  624. if (ino)
  625. printk(KERN_CONT "inode %lu: ", ino);
  626. if (block)
  627. printk(KERN_CONT "block %llu:",
  628. (unsigned long long) block);
  629. printk(KERN_CONT "%pV\n", &vaf);
  630. va_end(args);
  631. }
  632. if (test_opt(sb, ERRORS_CONT)) {
  633. ext4_commit_super(sb, 0);
  634. return;
  635. }
  636. ext4_unlock_group(sb, grp);
  637. ext4_handle_error(sb);
  638. /*
  639. * We only get here in the ERRORS_RO case; relocking the group
  640. * may be dangerous, but nothing bad will happen since the
  641. * filesystem will have already been marked read/only and the
  642. * journal has been aborted. We return 1 as a hint to callers
  643. * who might what to use the return value from
  644. * ext4_grp_locked_error() to distinguish between the
  645. * ERRORS_CONT and ERRORS_RO case, and perhaps return more
  646. * aggressively from the ext4 function in question, with a
  647. * more appropriate error code.
  648. */
  649. ext4_lock_group(sb, grp);
  650. return;
  651. }
  652. void ext4_update_dynamic_rev(struct super_block *sb)
  653. {
  654. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  655. if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
  656. return;
  657. ext4_warning(sb,
  658. "updating to rev %d because of new feature flag, "
  659. "running e2fsck is recommended",
  660. EXT4_DYNAMIC_REV);
  661. es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
  662. es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
  663. es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
  664. /* leave es->s_feature_*compat flags alone */
  665. /* es->s_uuid will be set by e2fsck if empty */
  666. /*
  667. * The rest of the superblock fields should be zero, and if not it
  668. * means they are likely already in use, so leave them alone. We
  669. * can leave it up to e2fsck to clean up any inconsistencies there.
  670. */
  671. }
  672. /*
  673. * Open the external journal device
  674. */
  675. static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
  676. {
  677. struct block_device *bdev;
  678. char b[BDEVNAME_SIZE];
  679. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
  680. if (IS_ERR(bdev))
  681. goto fail;
  682. return bdev;
  683. fail:
  684. ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
  685. __bdevname(dev, b), PTR_ERR(bdev));
  686. return NULL;
  687. }
  688. /*
  689. * Release the journal device
  690. */
  691. static void ext4_blkdev_put(struct block_device *bdev)
  692. {
  693. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  694. }
  695. static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
  696. {
  697. struct block_device *bdev;
  698. bdev = sbi->journal_bdev;
  699. if (bdev) {
  700. ext4_blkdev_put(bdev);
  701. sbi->journal_bdev = NULL;
  702. }
  703. }
  704. static inline struct inode *orphan_list_entry(struct list_head *l)
  705. {
  706. return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
  707. }
  708. static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
  709. {
  710. struct list_head *l;
  711. ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
  712. le32_to_cpu(sbi->s_es->s_last_orphan));
  713. printk(KERN_ERR "sb_info orphan list:\n");
  714. list_for_each(l, &sbi->s_orphan) {
  715. struct inode *inode = orphan_list_entry(l);
  716. printk(KERN_ERR " "
  717. "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
  718. inode->i_sb->s_id, inode->i_ino, inode,
  719. inode->i_mode, inode->i_nlink,
  720. NEXT_ORPHAN(inode));
  721. }
  722. }
  723. static void ext4_put_super(struct super_block *sb)
  724. {
  725. struct ext4_sb_info *sbi = EXT4_SB(sb);
  726. struct ext4_super_block *es = sbi->s_es;
  727. int i, err;
  728. ext4_unregister_li_request(sb);
  729. dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  730. flush_workqueue(sbi->rsv_conversion_wq);
  731. destroy_workqueue(sbi->rsv_conversion_wq);
  732. if (sbi->s_journal) {
  733. err = jbd2_journal_destroy(sbi->s_journal);
  734. sbi->s_journal = NULL;
  735. if (err < 0)
  736. ext4_abort(sb, "Couldn't clean up the journal");
  737. }
  738. ext4_unregister_sysfs(sb);
  739. ext4_es_unregister_shrinker(sbi);
  740. del_timer_sync(&sbi->s_err_report);
  741. ext4_release_system_zone(sb);
  742. ext4_mb_release(sb);
  743. ext4_ext_release(sb);
  744. ext4_xattr_put_super(sb);
  745. if (!(sb->s_flags & MS_RDONLY)) {
  746. ext4_clear_feature_journal_needs_recovery(sb);
  747. es->s_state = cpu_to_le16(sbi->s_mount_state);
  748. }
  749. if (!(sb->s_flags & MS_RDONLY))
  750. ext4_commit_super(sb, 1);
  751. for (i = 0; i < sbi->s_gdb_count; i++)
  752. brelse(sbi->s_group_desc[i]);
  753. kvfree(sbi->s_group_desc);
  754. kvfree(sbi->s_flex_groups);
  755. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  756. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  757. percpu_counter_destroy(&sbi->s_dirs_counter);
  758. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  759. brelse(sbi->s_sbh);
  760. #ifdef CONFIG_QUOTA
  761. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  762. kfree(sbi->s_qf_names[i]);
  763. #endif
  764. /* Debugging code just in case the in-memory inode orphan list
  765. * isn't empty. The on-disk one can be non-empty if we've
  766. * detected an error and taken the fs readonly, but the
  767. * in-memory list had better be clean by this point. */
  768. if (!list_empty(&sbi->s_orphan))
  769. dump_orphan_list(sb, sbi);
  770. J_ASSERT(list_empty(&sbi->s_orphan));
  771. sync_blockdev(sb->s_bdev);
  772. invalidate_bdev(sb->s_bdev);
  773. if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
  774. /*
  775. * Invalidate the journal device's buffers. We don't want them
  776. * floating about in memory - the physical journal device may
  777. * hotswapped, and it breaks the `ro-after' testing code.
  778. */
  779. sync_blockdev(sbi->journal_bdev);
  780. invalidate_bdev(sbi->journal_bdev);
  781. ext4_blkdev_remove(sbi);
  782. }
  783. if (sbi->s_mb_cache) {
  784. ext4_xattr_destroy_cache(sbi->s_mb_cache);
  785. sbi->s_mb_cache = NULL;
  786. }
  787. if (sbi->s_mmp_tsk)
  788. kthread_stop(sbi->s_mmp_tsk);
  789. sb->s_fs_info = NULL;
  790. /*
  791. * Now that we are completely done shutting down the
  792. * superblock, we need to actually destroy the kobject.
  793. */
  794. kobject_put(&sbi->s_kobj);
  795. wait_for_completion(&sbi->s_kobj_unregister);
  796. if (sbi->s_chksum_driver)
  797. crypto_free_shash(sbi->s_chksum_driver);
  798. kfree(sbi->s_blockgroup_lock);
  799. kfree(sbi);
  800. }
  801. static struct kmem_cache *ext4_inode_cachep;
  802. /*
  803. * Called inside transaction, so use GFP_NOFS
  804. */
  805. static struct inode *ext4_alloc_inode(struct super_block *sb)
  806. {
  807. struct ext4_inode_info *ei;
  808. ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
  809. if (!ei)
  810. return NULL;
  811. ei->vfs_inode.i_version = 1;
  812. spin_lock_init(&ei->i_raw_lock);
  813. INIT_LIST_HEAD(&ei->i_prealloc_list);
  814. spin_lock_init(&ei->i_prealloc_lock);
  815. ext4_es_init_tree(&ei->i_es_tree);
  816. rwlock_init(&ei->i_es_lock);
  817. INIT_LIST_HEAD(&ei->i_es_list);
  818. ei->i_es_all_nr = 0;
  819. ei->i_es_shk_nr = 0;
  820. ei->i_es_shrink_lblk = 0;
  821. ei->i_reserved_data_blocks = 0;
  822. ei->i_reserved_meta_blocks = 0;
  823. ei->i_allocated_meta_blocks = 0;
  824. ei->i_da_metadata_calc_len = 0;
  825. ei->i_da_metadata_calc_last_lblock = 0;
  826. spin_lock_init(&(ei->i_block_reservation_lock));
  827. #ifdef CONFIG_QUOTA
  828. ei->i_reserved_quota = 0;
  829. memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
  830. #endif
  831. ei->jinode = NULL;
  832. INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
  833. spin_lock_init(&ei->i_completed_io_lock);
  834. ei->i_sync_tid = 0;
  835. ei->i_datasync_tid = 0;
  836. atomic_set(&ei->i_ioend_count, 0);
  837. atomic_set(&ei->i_unwritten, 0);
  838. INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
  839. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  840. ei->i_crypt_info = NULL;
  841. #endif
  842. return &ei->vfs_inode;
  843. }
  844. static int ext4_drop_inode(struct inode *inode)
  845. {
  846. int drop = generic_drop_inode(inode);
  847. trace_ext4_drop_inode(inode, drop);
  848. return drop;
  849. }
  850. static void ext4_i_callback(struct rcu_head *head)
  851. {
  852. struct inode *inode = container_of(head, struct inode, i_rcu);
  853. kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
  854. }
  855. static void ext4_destroy_inode(struct inode *inode)
  856. {
  857. if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
  858. ext4_msg(inode->i_sb, KERN_ERR,
  859. "Inode %lu (%p): orphan list check failed!",
  860. inode->i_ino, EXT4_I(inode));
  861. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
  862. EXT4_I(inode), sizeof(struct ext4_inode_info),
  863. true);
  864. dump_stack();
  865. }
  866. call_rcu(&inode->i_rcu, ext4_i_callback);
  867. }
  868. static void init_once(void *foo)
  869. {
  870. struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
  871. INIT_LIST_HEAD(&ei->i_orphan);
  872. init_rwsem(&ei->xattr_sem);
  873. init_rwsem(&ei->i_data_sem);
  874. init_rwsem(&ei->i_mmap_sem);
  875. inode_init_once(&ei->vfs_inode);
  876. }
  877. static int __init init_inodecache(void)
  878. {
  879. ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
  880. sizeof(struct ext4_inode_info),
  881. 0, (SLAB_RECLAIM_ACCOUNT|
  882. SLAB_MEM_SPREAD|SLAB_ACCOUNT),
  883. init_once);
  884. if (ext4_inode_cachep == NULL)
  885. return -ENOMEM;
  886. return 0;
  887. }
  888. static void destroy_inodecache(void)
  889. {
  890. /*
  891. * Make sure all delayed rcu free inodes are flushed before we
  892. * destroy cache.
  893. */
  894. rcu_barrier();
  895. kmem_cache_destroy(ext4_inode_cachep);
  896. }
  897. void ext4_clear_inode(struct inode *inode)
  898. {
  899. invalidate_inode_buffers(inode);
  900. clear_inode(inode);
  901. dquot_drop(inode);
  902. ext4_discard_preallocations(inode);
  903. ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
  904. if (EXT4_I(inode)->jinode) {
  905. jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
  906. EXT4_I(inode)->jinode);
  907. jbd2_free_inode(EXT4_I(inode)->jinode);
  908. EXT4_I(inode)->jinode = NULL;
  909. }
  910. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  911. if (EXT4_I(inode)->i_crypt_info)
  912. ext4_free_encryption_info(inode, EXT4_I(inode)->i_crypt_info);
  913. #endif
  914. }
  915. static struct inode *ext4_nfs_get_inode(struct super_block *sb,
  916. u64 ino, u32 generation)
  917. {
  918. struct inode *inode;
  919. if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
  920. return ERR_PTR(-ESTALE);
  921. if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
  922. return ERR_PTR(-ESTALE);
  923. /* iget isn't really right if the inode is currently unallocated!!
  924. *
  925. * ext4_read_inode will return a bad_inode if the inode had been
  926. * deleted, so we should be safe.
  927. *
  928. * Currently we don't know the generation for parent directory, so
  929. * a generation of 0 means "accept any"
  930. */
  931. inode = ext4_iget_normal(sb, ino);
  932. if (IS_ERR(inode))
  933. return ERR_CAST(inode);
  934. if (generation && inode->i_generation != generation) {
  935. iput(inode);
  936. return ERR_PTR(-ESTALE);
  937. }
  938. return inode;
  939. }
  940. static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
  941. int fh_len, int fh_type)
  942. {
  943. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  944. ext4_nfs_get_inode);
  945. }
  946. static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
  947. int fh_len, int fh_type)
  948. {
  949. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  950. ext4_nfs_get_inode);
  951. }
  952. /*
  953. * Try to release metadata pages (indirect blocks, directories) which are
  954. * mapped via the block device. Since these pages could have journal heads
  955. * which would prevent try_to_free_buffers() from freeing them, we must use
  956. * jbd2 layer's try_to_free_buffers() function to release them.
  957. */
  958. static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
  959. gfp_t wait)
  960. {
  961. journal_t *journal = EXT4_SB(sb)->s_journal;
  962. WARN_ON(PageChecked(page));
  963. if (!page_has_buffers(page))
  964. return 0;
  965. if (journal)
  966. return jbd2_journal_try_to_free_buffers(journal, page,
  967. wait & ~__GFP_DIRECT_RECLAIM);
  968. return try_to_free_buffers(page);
  969. }
  970. #ifdef CONFIG_QUOTA
  971. static char *quotatypes[] = INITQFNAMES;
  972. #define QTYPE2NAME(t) (quotatypes[t])
  973. static int ext4_write_dquot(struct dquot *dquot);
  974. static int ext4_acquire_dquot(struct dquot *dquot);
  975. static int ext4_release_dquot(struct dquot *dquot);
  976. static int ext4_mark_dquot_dirty(struct dquot *dquot);
  977. static int ext4_write_info(struct super_block *sb, int type);
  978. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  979. struct path *path);
  980. static int ext4_quota_off(struct super_block *sb, int type);
  981. static int ext4_quota_on_mount(struct super_block *sb, int type);
  982. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  983. size_t len, loff_t off);
  984. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  985. const char *data, size_t len, loff_t off);
  986. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  987. unsigned int flags);
  988. static int ext4_enable_quotas(struct super_block *sb);
  989. static struct dquot **ext4_get_dquots(struct inode *inode)
  990. {
  991. return EXT4_I(inode)->i_dquot;
  992. }
  993. static const struct dquot_operations ext4_quota_operations = {
  994. .get_reserved_space = ext4_get_reserved_space,
  995. .write_dquot = ext4_write_dquot,
  996. .acquire_dquot = ext4_acquire_dquot,
  997. .release_dquot = ext4_release_dquot,
  998. .mark_dirty = ext4_mark_dquot_dirty,
  999. .write_info = ext4_write_info,
  1000. .alloc_dquot = dquot_alloc,
  1001. .destroy_dquot = dquot_destroy,
  1002. .get_projid = ext4_get_projid,
  1003. };
  1004. static const struct quotactl_ops ext4_qctl_operations = {
  1005. .quota_on = ext4_quota_on,
  1006. .quota_off = ext4_quota_off,
  1007. .quota_sync = dquot_quota_sync,
  1008. .get_state = dquot_get_state,
  1009. .set_info = dquot_set_dqinfo,
  1010. .get_dqblk = dquot_get_dqblk,
  1011. .set_dqblk = dquot_set_dqblk
  1012. };
  1013. #endif
  1014. static const struct super_operations ext4_sops = {
  1015. .alloc_inode = ext4_alloc_inode,
  1016. .destroy_inode = ext4_destroy_inode,
  1017. .write_inode = ext4_write_inode,
  1018. .dirty_inode = ext4_dirty_inode,
  1019. .drop_inode = ext4_drop_inode,
  1020. .evict_inode = ext4_evict_inode,
  1021. .put_super = ext4_put_super,
  1022. .sync_fs = ext4_sync_fs,
  1023. .freeze_fs = ext4_freeze,
  1024. .unfreeze_fs = ext4_unfreeze,
  1025. .statfs = ext4_statfs,
  1026. .remount_fs = ext4_remount,
  1027. .show_options = ext4_show_options,
  1028. #ifdef CONFIG_QUOTA
  1029. .quota_read = ext4_quota_read,
  1030. .quota_write = ext4_quota_write,
  1031. .get_dquots = ext4_get_dquots,
  1032. #endif
  1033. .bdev_try_to_free_page = bdev_try_to_free_page,
  1034. };
  1035. static const struct export_operations ext4_export_ops = {
  1036. .fh_to_dentry = ext4_fh_to_dentry,
  1037. .fh_to_parent = ext4_fh_to_parent,
  1038. .get_parent = ext4_get_parent,
  1039. };
  1040. enum {
  1041. Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
  1042. Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
  1043. Opt_nouid32, Opt_debug, Opt_removed,
  1044. Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
  1045. Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
  1046. Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
  1047. Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
  1048. Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
  1049. Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
  1050. Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
  1051. Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
  1052. Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
  1053. Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax,
  1054. Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
  1055. Opt_lazytime, Opt_nolazytime,
  1056. Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
  1057. Opt_inode_readahead_blks, Opt_journal_ioprio,
  1058. Opt_dioread_nolock, Opt_dioread_lock,
  1059. Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
  1060. Opt_max_dir_size_kb, Opt_nojournal_checksum,
  1061. };
  1062. static const match_table_t tokens = {
  1063. {Opt_bsd_df, "bsddf"},
  1064. {Opt_minix_df, "minixdf"},
  1065. {Opt_grpid, "grpid"},
  1066. {Opt_grpid, "bsdgroups"},
  1067. {Opt_nogrpid, "nogrpid"},
  1068. {Opt_nogrpid, "sysvgroups"},
  1069. {Opt_resgid, "resgid=%u"},
  1070. {Opt_resuid, "resuid=%u"},
  1071. {Opt_sb, "sb=%u"},
  1072. {Opt_err_cont, "errors=continue"},
  1073. {Opt_err_panic, "errors=panic"},
  1074. {Opt_err_ro, "errors=remount-ro"},
  1075. {Opt_nouid32, "nouid32"},
  1076. {Opt_debug, "debug"},
  1077. {Opt_removed, "oldalloc"},
  1078. {Opt_removed, "orlov"},
  1079. {Opt_user_xattr, "user_xattr"},
  1080. {Opt_nouser_xattr, "nouser_xattr"},
  1081. {Opt_acl, "acl"},
  1082. {Opt_noacl, "noacl"},
  1083. {Opt_noload, "norecovery"},
  1084. {Opt_noload, "noload"},
  1085. {Opt_removed, "nobh"},
  1086. {Opt_removed, "bh"},
  1087. {Opt_commit, "commit=%u"},
  1088. {Opt_min_batch_time, "min_batch_time=%u"},
  1089. {Opt_max_batch_time, "max_batch_time=%u"},
  1090. {Opt_journal_dev, "journal_dev=%u"},
  1091. {Opt_journal_path, "journal_path=%s"},
  1092. {Opt_journal_checksum, "journal_checksum"},
  1093. {Opt_nojournal_checksum, "nojournal_checksum"},
  1094. {Opt_journal_async_commit, "journal_async_commit"},
  1095. {Opt_abort, "abort"},
  1096. {Opt_data_journal, "data=journal"},
  1097. {Opt_data_ordered, "data=ordered"},
  1098. {Opt_data_writeback, "data=writeback"},
  1099. {Opt_data_err_abort, "data_err=abort"},
  1100. {Opt_data_err_ignore, "data_err=ignore"},
  1101. {Opt_offusrjquota, "usrjquota="},
  1102. {Opt_usrjquota, "usrjquota=%s"},
  1103. {Opt_offgrpjquota, "grpjquota="},
  1104. {Opt_grpjquota, "grpjquota=%s"},
  1105. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  1106. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  1107. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  1108. {Opt_grpquota, "grpquota"},
  1109. {Opt_noquota, "noquota"},
  1110. {Opt_quota, "quota"},
  1111. {Opt_usrquota, "usrquota"},
  1112. {Opt_barrier, "barrier=%u"},
  1113. {Opt_barrier, "barrier"},
  1114. {Opt_nobarrier, "nobarrier"},
  1115. {Opt_i_version, "i_version"},
  1116. {Opt_dax, "dax"},
  1117. {Opt_stripe, "stripe=%u"},
  1118. {Opt_delalloc, "delalloc"},
  1119. {Opt_lazytime, "lazytime"},
  1120. {Opt_nolazytime, "nolazytime"},
  1121. {Opt_nodelalloc, "nodelalloc"},
  1122. {Opt_removed, "mblk_io_submit"},
  1123. {Opt_removed, "nomblk_io_submit"},
  1124. {Opt_block_validity, "block_validity"},
  1125. {Opt_noblock_validity, "noblock_validity"},
  1126. {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
  1127. {Opt_journal_ioprio, "journal_ioprio=%u"},
  1128. {Opt_auto_da_alloc, "auto_da_alloc=%u"},
  1129. {Opt_auto_da_alloc, "auto_da_alloc"},
  1130. {Opt_noauto_da_alloc, "noauto_da_alloc"},
  1131. {Opt_dioread_nolock, "dioread_nolock"},
  1132. {Opt_dioread_lock, "dioread_lock"},
  1133. {Opt_discard, "discard"},
  1134. {Opt_nodiscard, "nodiscard"},
  1135. {Opt_init_itable, "init_itable=%u"},
  1136. {Opt_init_itable, "init_itable"},
  1137. {Opt_noinit_itable, "noinit_itable"},
  1138. {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
  1139. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  1140. {Opt_removed, "check=none"}, /* mount option from ext2/3 */
  1141. {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
  1142. {Opt_removed, "reservation"}, /* mount option from ext2/3 */
  1143. {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
  1144. {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
  1145. {Opt_err, NULL},
  1146. };
  1147. static ext4_fsblk_t get_sb_block(void **data)
  1148. {
  1149. ext4_fsblk_t sb_block;
  1150. char *options = (char *) *data;
  1151. if (!options || strncmp(options, "sb=", 3) != 0)
  1152. return 1; /* Default location */
  1153. options += 3;
  1154. /* TODO: use simple_strtoll with >32bit ext4 */
  1155. sb_block = simple_strtoul(options, &options, 0);
  1156. if (*options && *options != ',') {
  1157. printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
  1158. (char *) *data);
  1159. return 1;
  1160. }
  1161. if (*options == ',')
  1162. options++;
  1163. *data = (void *) options;
  1164. return sb_block;
  1165. }
  1166. #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
  1167. static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
  1168. "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
  1169. #ifdef CONFIG_QUOTA
  1170. static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
  1171. {
  1172. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1173. char *qname;
  1174. int ret = -1;
  1175. if (sb_any_quota_loaded(sb) &&
  1176. !sbi->s_qf_names[qtype]) {
  1177. ext4_msg(sb, KERN_ERR,
  1178. "Cannot change journaled "
  1179. "quota options when quota turned on");
  1180. return -1;
  1181. }
  1182. if (ext4_has_feature_quota(sb)) {
  1183. ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
  1184. "when QUOTA feature is enabled");
  1185. return -1;
  1186. }
  1187. qname = match_strdup(args);
  1188. if (!qname) {
  1189. ext4_msg(sb, KERN_ERR,
  1190. "Not enough memory for storing quotafile name");
  1191. return -1;
  1192. }
  1193. if (sbi->s_qf_names[qtype]) {
  1194. if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
  1195. ret = 1;
  1196. else
  1197. ext4_msg(sb, KERN_ERR,
  1198. "%s quota file already specified",
  1199. QTYPE2NAME(qtype));
  1200. goto errout;
  1201. }
  1202. if (strchr(qname, '/')) {
  1203. ext4_msg(sb, KERN_ERR,
  1204. "quotafile must be on filesystem root");
  1205. goto errout;
  1206. }
  1207. sbi->s_qf_names[qtype] = qname;
  1208. set_opt(sb, QUOTA);
  1209. return 1;
  1210. errout:
  1211. kfree(qname);
  1212. return ret;
  1213. }
  1214. static int clear_qf_name(struct super_block *sb, int qtype)
  1215. {
  1216. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1217. if (sb_any_quota_loaded(sb) &&
  1218. sbi->s_qf_names[qtype]) {
  1219. ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
  1220. " when quota turned on");
  1221. return -1;
  1222. }
  1223. kfree(sbi->s_qf_names[qtype]);
  1224. sbi->s_qf_names[qtype] = NULL;
  1225. return 1;
  1226. }
  1227. #endif
  1228. #define MOPT_SET 0x0001
  1229. #define MOPT_CLEAR 0x0002
  1230. #define MOPT_NOSUPPORT 0x0004
  1231. #define MOPT_EXPLICIT 0x0008
  1232. #define MOPT_CLEAR_ERR 0x0010
  1233. #define MOPT_GTE0 0x0020
  1234. #ifdef CONFIG_QUOTA
  1235. #define MOPT_Q 0
  1236. #define MOPT_QFMT 0x0040
  1237. #else
  1238. #define MOPT_Q MOPT_NOSUPPORT
  1239. #define MOPT_QFMT MOPT_NOSUPPORT
  1240. #endif
  1241. #define MOPT_DATAJ 0x0080
  1242. #define MOPT_NO_EXT2 0x0100
  1243. #define MOPT_NO_EXT3 0x0200
  1244. #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
  1245. #define MOPT_STRING 0x0400
  1246. static const struct mount_opts {
  1247. int token;
  1248. int mount_opt;
  1249. int flags;
  1250. } ext4_mount_opts[] = {
  1251. {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
  1252. {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
  1253. {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
  1254. {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
  1255. {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
  1256. {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
  1257. {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1258. MOPT_EXT4_ONLY | MOPT_SET},
  1259. {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1260. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1261. {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
  1262. {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
  1263. {Opt_delalloc, EXT4_MOUNT_DELALLOC,
  1264. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1265. {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
  1266. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1267. {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1268. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1269. {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1270. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1271. {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
  1272. EXT4_MOUNT_JOURNAL_CHECKSUM),
  1273. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1274. {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
  1275. {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
  1276. {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
  1277. {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
  1278. {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
  1279. MOPT_NO_EXT2 | MOPT_SET},
  1280. {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
  1281. MOPT_NO_EXT2 | MOPT_CLEAR},
  1282. {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
  1283. {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
  1284. {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
  1285. {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
  1286. {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
  1287. {Opt_commit, 0, MOPT_GTE0},
  1288. {Opt_max_batch_time, 0, MOPT_GTE0},
  1289. {Opt_min_batch_time, 0, MOPT_GTE0},
  1290. {Opt_inode_readahead_blks, 0, MOPT_GTE0},
  1291. {Opt_init_itable, 0, MOPT_GTE0},
  1292. {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
  1293. {Opt_stripe, 0, MOPT_GTE0},
  1294. {Opt_resuid, 0, MOPT_GTE0},
  1295. {Opt_resgid, 0, MOPT_GTE0},
  1296. {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1297. {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
  1298. {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1299. {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1300. {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1301. {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
  1302. MOPT_NO_EXT2 | MOPT_DATAJ},
  1303. {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
  1304. {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
  1305. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  1306. {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
  1307. {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
  1308. #else
  1309. {Opt_acl, 0, MOPT_NOSUPPORT},
  1310. {Opt_noacl, 0, MOPT_NOSUPPORT},
  1311. #endif
  1312. {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
  1313. {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
  1314. {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
  1315. {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
  1316. MOPT_SET | MOPT_Q},
  1317. {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
  1318. MOPT_SET | MOPT_Q},
  1319. {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
  1320. EXT4_MOUNT_GRPQUOTA), MOPT_CLEAR | MOPT_Q},
  1321. {Opt_usrjquota, 0, MOPT_Q},
  1322. {Opt_grpjquota, 0, MOPT_Q},
  1323. {Opt_offusrjquota, 0, MOPT_Q},
  1324. {Opt_offgrpjquota, 0, MOPT_Q},
  1325. {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
  1326. {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
  1327. {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
  1328. {Opt_max_dir_size_kb, 0, MOPT_GTE0},
  1329. {Opt_test_dummy_encryption, 0, MOPT_GTE0},
  1330. {Opt_err, 0, 0}
  1331. };
  1332. static int handle_mount_opt(struct super_block *sb, char *opt, int token,
  1333. substring_t *args, unsigned long *journal_devnum,
  1334. unsigned int *journal_ioprio, int is_remount)
  1335. {
  1336. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1337. const struct mount_opts *m;
  1338. kuid_t uid;
  1339. kgid_t gid;
  1340. int arg = 0;
  1341. #ifdef CONFIG_QUOTA
  1342. if (token == Opt_usrjquota)
  1343. return set_qf_name(sb, USRQUOTA, &args[0]);
  1344. else if (token == Opt_grpjquota)
  1345. return set_qf_name(sb, GRPQUOTA, &args[0]);
  1346. else if (token == Opt_offusrjquota)
  1347. return clear_qf_name(sb, USRQUOTA);
  1348. else if (token == Opt_offgrpjquota)
  1349. return clear_qf_name(sb, GRPQUOTA);
  1350. #endif
  1351. switch (token) {
  1352. case Opt_noacl:
  1353. case Opt_nouser_xattr:
  1354. ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
  1355. break;
  1356. case Opt_sb:
  1357. return 1; /* handled by get_sb_block() */
  1358. case Opt_removed:
  1359. ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
  1360. return 1;
  1361. case Opt_abort:
  1362. sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
  1363. return 1;
  1364. case Opt_i_version:
  1365. sb->s_flags |= MS_I_VERSION;
  1366. return 1;
  1367. case Opt_lazytime:
  1368. sb->s_flags |= MS_LAZYTIME;
  1369. return 1;
  1370. case Opt_nolazytime:
  1371. sb->s_flags &= ~MS_LAZYTIME;
  1372. return 1;
  1373. }
  1374. for (m = ext4_mount_opts; m->token != Opt_err; m++)
  1375. if (token == m->token)
  1376. break;
  1377. if (m->token == Opt_err) {
  1378. ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
  1379. "or missing value", opt);
  1380. return -1;
  1381. }
  1382. if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
  1383. ext4_msg(sb, KERN_ERR,
  1384. "Mount option \"%s\" incompatible with ext2", opt);
  1385. return -1;
  1386. }
  1387. if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
  1388. ext4_msg(sb, KERN_ERR,
  1389. "Mount option \"%s\" incompatible with ext3", opt);
  1390. return -1;
  1391. }
  1392. if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
  1393. return -1;
  1394. if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
  1395. return -1;
  1396. if (m->flags & MOPT_EXPLICIT) {
  1397. if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
  1398. set_opt2(sb, EXPLICIT_DELALLOC);
  1399. } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
  1400. set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
  1401. } else
  1402. return -1;
  1403. }
  1404. if (m->flags & MOPT_CLEAR_ERR)
  1405. clear_opt(sb, ERRORS_MASK);
  1406. if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
  1407. ext4_msg(sb, KERN_ERR, "Cannot change quota "
  1408. "options when quota turned on");
  1409. return -1;
  1410. }
  1411. if (m->flags & MOPT_NOSUPPORT) {
  1412. ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
  1413. } else if (token == Opt_commit) {
  1414. if (arg == 0)
  1415. arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
  1416. sbi->s_commit_interval = HZ * arg;
  1417. } else if (token == Opt_max_batch_time) {
  1418. sbi->s_max_batch_time = arg;
  1419. } else if (token == Opt_min_batch_time) {
  1420. sbi->s_min_batch_time = arg;
  1421. } else if (token == Opt_inode_readahead_blks) {
  1422. if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
  1423. ext4_msg(sb, KERN_ERR,
  1424. "EXT4-fs: inode_readahead_blks must be "
  1425. "0 or a power of 2 smaller than 2^31");
  1426. return -1;
  1427. }
  1428. sbi->s_inode_readahead_blks = arg;
  1429. } else if (token == Opt_init_itable) {
  1430. set_opt(sb, INIT_INODE_TABLE);
  1431. if (!args->from)
  1432. arg = EXT4_DEF_LI_WAIT_MULT;
  1433. sbi->s_li_wait_mult = arg;
  1434. } else if (token == Opt_max_dir_size_kb) {
  1435. sbi->s_max_dir_size_kb = arg;
  1436. } else if (token == Opt_stripe) {
  1437. sbi->s_stripe = arg;
  1438. } else if (token == Opt_resuid) {
  1439. uid = make_kuid(current_user_ns(), arg);
  1440. if (!uid_valid(uid)) {
  1441. ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
  1442. return -1;
  1443. }
  1444. sbi->s_resuid = uid;
  1445. } else if (token == Opt_resgid) {
  1446. gid = make_kgid(current_user_ns(), arg);
  1447. if (!gid_valid(gid)) {
  1448. ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
  1449. return -1;
  1450. }
  1451. sbi->s_resgid = gid;
  1452. } else if (token == Opt_journal_dev) {
  1453. if (is_remount) {
  1454. ext4_msg(sb, KERN_ERR,
  1455. "Cannot specify journal on remount");
  1456. return -1;
  1457. }
  1458. *journal_devnum = arg;
  1459. } else if (token == Opt_journal_path) {
  1460. char *journal_path;
  1461. struct inode *journal_inode;
  1462. struct path path;
  1463. int error;
  1464. if (is_remount) {
  1465. ext4_msg(sb, KERN_ERR,
  1466. "Cannot specify journal on remount");
  1467. return -1;
  1468. }
  1469. journal_path = match_strdup(&args[0]);
  1470. if (!journal_path) {
  1471. ext4_msg(sb, KERN_ERR, "error: could not dup "
  1472. "journal device string");
  1473. return -1;
  1474. }
  1475. error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
  1476. if (error) {
  1477. ext4_msg(sb, KERN_ERR, "error: could not find "
  1478. "journal device path: error %d", error);
  1479. kfree(journal_path);
  1480. return -1;
  1481. }
  1482. journal_inode = d_inode(path.dentry);
  1483. if (!S_ISBLK(journal_inode->i_mode)) {
  1484. ext4_msg(sb, KERN_ERR, "error: journal path %s "
  1485. "is not a block device", journal_path);
  1486. path_put(&path);
  1487. kfree(journal_path);
  1488. return -1;
  1489. }
  1490. *journal_devnum = new_encode_dev(journal_inode->i_rdev);
  1491. path_put(&path);
  1492. kfree(journal_path);
  1493. } else if (token == Opt_journal_ioprio) {
  1494. if (arg > 7) {
  1495. ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
  1496. " (must be 0-7)");
  1497. return -1;
  1498. }
  1499. *journal_ioprio =
  1500. IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
  1501. } else if (token == Opt_test_dummy_encryption) {
  1502. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1503. sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
  1504. ext4_msg(sb, KERN_WARNING,
  1505. "Test dummy encryption mode enabled");
  1506. #else
  1507. ext4_msg(sb, KERN_WARNING,
  1508. "Test dummy encryption mount option ignored");
  1509. #endif
  1510. } else if (m->flags & MOPT_DATAJ) {
  1511. if (is_remount) {
  1512. if (!sbi->s_journal)
  1513. ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
  1514. else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
  1515. ext4_msg(sb, KERN_ERR,
  1516. "Cannot change data mode on remount");
  1517. return -1;
  1518. }
  1519. } else {
  1520. clear_opt(sb, DATA_FLAGS);
  1521. sbi->s_mount_opt |= m->mount_opt;
  1522. }
  1523. #ifdef CONFIG_QUOTA
  1524. } else if (m->flags & MOPT_QFMT) {
  1525. if (sb_any_quota_loaded(sb) &&
  1526. sbi->s_jquota_fmt != m->mount_opt) {
  1527. ext4_msg(sb, KERN_ERR, "Cannot change journaled "
  1528. "quota options when quota turned on");
  1529. return -1;
  1530. }
  1531. if (ext4_has_feature_quota(sb)) {
  1532. ext4_msg(sb, KERN_ERR,
  1533. "Cannot set journaled quota options "
  1534. "when QUOTA feature is enabled");
  1535. return -1;
  1536. }
  1537. sbi->s_jquota_fmt = m->mount_opt;
  1538. #endif
  1539. } else if (token == Opt_dax) {
  1540. #ifdef CONFIG_FS_DAX
  1541. ext4_msg(sb, KERN_WARNING,
  1542. "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
  1543. sbi->s_mount_opt |= m->mount_opt;
  1544. #else
  1545. ext4_msg(sb, KERN_INFO, "dax option not supported");
  1546. return -1;
  1547. #endif
  1548. } else {
  1549. if (!args->from)
  1550. arg = 1;
  1551. if (m->flags & MOPT_CLEAR)
  1552. arg = !arg;
  1553. else if (unlikely(!(m->flags & MOPT_SET))) {
  1554. ext4_msg(sb, KERN_WARNING,
  1555. "buggy handling of option %s", opt);
  1556. WARN_ON(1);
  1557. return -1;
  1558. }
  1559. if (arg != 0)
  1560. sbi->s_mount_opt |= m->mount_opt;
  1561. else
  1562. sbi->s_mount_opt &= ~m->mount_opt;
  1563. }
  1564. return 1;
  1565. }
  1566. static int parse_options(char *options, struct super_block *sb,
  1567. unsigned long *journal_devnum,
  1568. unsigned int *journal_ioprio,
  1569. int is_remount)
  1570. {
  1571. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1572. char *p;
  1573. substring_t args[MAX_OPT_ARGS];
  1574. int token;
  1575. if (!options)
  1576. return 1;
  1577. while ((p = strsep(&options, ",")) != NULL) {
  1578. if (!*p)
  1579. continue;
  1580. /*
  1581. * Initialize args struct so we know whether arg was
  1582. * found; some options take optional arguments.
  1583. */
  1584. args[0].to = args[0].from = NULL;
  1585. token = match_token(p, tokens, args);
  1586. if (handle_mount_opt(sb, p, token, args, journal_devnum,
  1587. journal_ioprio, is_remount) < 0)
  1588. return 0;
  1589. }
  1590. #ifdef CONFIG_QUOTA
  1591. if (ext4_has_feature_quota(sb) &&
  1592. (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
  1593. ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
  1594. "feature is enabled");
  1595. return 0;
  1596. }
  1597. if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  1598. if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
  1599. clear_opt(sb, USRQUOTA);
  1600. if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
  1601. clear_opt(sb, GRPQUOTA);
  1602. if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
  1603. ext4_msg(sb, KERN_ERR, "old and new quota "
  1604. "format mixing");
  1605. return 0;
  1606. }
  1607. if (!sbi->s_jquota_fmt) {
  1608. ext4_msg(sb, KERN_ERR, "journaled quota format "
  1609. "not specified");
  1610. return 0;
  1611. }
  1612. }
  1613. #endif
  1614. if (test_opt(sb, DIOREAD_NOLOCK)) {
  1615. int blocksize =
  1616. BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
  1617. if (blocksize < PAGE_CACHE_SIZE) {
  1618. ext4_msg(sb, KERN_ERR, "can't mount with "
  1619. "dioread_nolock if block size != PAGE_SIZE");
  1620. return 0;
  1621. }
  1622. }
  1623. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
  1624. test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  1625. ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
  1626. "in data=ordered mode");
  1627. return 0;
  1628. }
  1629. return 1;
  1630. }
  1631. static inline void ext4_show_quota_options(struct seq_file *seq,
  1632. struct super_block *sb)
  1633. {
  1634. #if defined(CONFIG_QUOTA)
  1635. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1636. if (sbi->s_jquota_fmt) {
  1637. char *fmtname = "";
  1638. switch (sbi->s_jquota_fmt) {
  1639. case QFMT_VFS_OLD:
  1640. fmtname = "vfsold";
  1641. break;
  1642. case QFMT_VFS_V0:
  1643. fmtname = "vfsv0";
  1644. break;
  1645. case QFMT_VFS_V1:
  1646. fmtname = "vfsv1";
  1647. break;
  1648. }
  1649. seq_printf(seq, ",jqfmt=%s", fmtname);
  1650. }
  1651. if (sbi->s_qf_names[USRQUOTA])
  1652. seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
  1653. if (sbi->s_qf_names[GRPQUOTA])
  1654. seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
  1655. #endif
  1656. }
  1657. static const char *token2str(int token)
  1658. {
  1659. const struct match_token *t;
  1660. for (t = tokens; t->token != Opt_err; t++)
  1661. if (t->token == token && !strchr(t->pattern, '='))
  1662. break;
  1663. return t->pattern;
  1664. }
  1665. /*
  1666. * Show an option if
  1667. * - it's set to a non-default value OR
  1668. * - if the per-sb default is different from the global default
  1669. */
  1670. static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
  1671. int nodefs)
  1672. {
  1673. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1674. struct ext4_super_block *es = sbi->s_es;
  1675. int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
  1676. const struct mount_opts *m;
  1677. char sep = nodefs ? '\n' : ',';
  1678. #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
  1679. #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
  1680. if (sbi->s_sb_block != 1)
  1681. SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
  1682. for (m = ext4_mount_opts; m->token != Opt_err; m++) {
  1683. int want_set = m->flags & MOPT_SET;
  1684. if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
  1685. (m->flags & MOPT_CLEAR_ERR))
  1686. continue;
  1687. if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
  1688. continue; /* skip if same as the default */
  1689. if ((want_set &&
  1690. (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
  1691. (!want_set && (sbi->s_mount_opt & m->mount_opt)))
  1692. continue; /* select Opt_noFoo vs Opt_Foo */
  1693. SEQ_OPTS_PRINT("%s", token2str(m->token));
  1694. }
  1695. if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
  1696. le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
  1697. SEQ_OPTS_PRINT("resuid=%u",
  1698. from_kuid_munged(&init_user_ns, sbi->s_resuid));
  1699. if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
  1700. le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
  1701. SEQ_OPTS_PRINT("resgid=%u",
  1702. from_kgid_munged(&init_user_ns, sbi->s_resgid));
  1703. def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
  1704. if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
  1705. SEQ_OPTS_PUTS("errors=remount-ro");
  1706. if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
  1707. SEQ_OPTS_PUTS("errors=continue");
  1708. if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
  1709. SEQ_OPTS_PUTS("errors=panic");
  1710. if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
  1711. SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
  1712. if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
  1713. SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
  1714. if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
  1715. SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
  1716. if (sb->s_flags & MS_I_VERSION)
  1717. SEQ_OPTS_PUTS("i_version");
  1718. if (nodefs || sbi->s_stripe)
  1719. SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
  1720. if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
  1721. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  1722. SEQ_OPTS_PUTS("data=journal");
  1723. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  1724. SEQ_OPTS_PUTS("data=ordered");
  1725. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
  1726. SEQ_OPTS_PUTS("data=writeback");
  1727. }
  1728. if (nodefs ||
  1729. sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
  1730. SEQ_OPTS_PRINT("inode_readahead_blks=%u",
  1731. sbi->s_inode_readahead_blks);
  1732. if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
  1733. (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
  1734. SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
  1735. if (nodefs || sbi->s_max_dir_size_kb)
  1736. SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
  1737. ext4_show_quota_options(seq, sb);
  1738. return 0;
  1739. }
  1740. static int ext4_show_options(struct seq_file *seq, struct dentry *root)
  1741. {
  1742. return _ext4_show_options(seq, root->d_sb, 0);
  1743. }
  1744. int ext4_seq_options_show(struct seq_file *seq, void *offset)
  1745. {
  1746. struct super_block *sb = seq->private;
  1747. int rc;
  1748. seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw");
  1749. rc = _ext4_show_options(seq, sb, 1);
  1750. seq_puts(seq, "\n");
  1751. return rc;
  1752. }
  1753. static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
  1754. int read_only)
  1755. {
  1756. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1757. int res = 0;
  1758. if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
  1759. ext4_msg(sb, KERN_ERR, "revision level too high, "
  1760. "forcing read-only mode");
  1761. res = MS_RDONLY;
  1762. }
  1763. if (read_only)
  1764. goto done;
  1765. if (!(sbi->s_mount_state & EXT4_VALID_FS))
  1766. ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
  1767. "running e2fsck is recommended");
  1768. else if (sbi->s_mount_state & EXT4_ERROR_FS)
  1769. ext4_msg(sb, KERN_WARNING,
  1770. "warning: mounting fs with errors, "
  1771. "running e2fsck is recommended");
  1772. else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
  1773. le16_to_cpu(es->s_mnt_count) >=
  1774. (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
  1775. ext4_msg(sb, KERN_WARNING,
  1776. "warning: maximal mount count reached, "
  1777. "running e2fsck is recommended");
  1778. else if (le32_to_cpu(es->s_checkinterval) &&
  1779. (le32_to_cpu(es->s_lastcheck) +
  1780. le32_to_cpu(es->s_checkinterval) <= get_seconds()))
  1781. ext4_msg(sb, KERN_WARNING,
  1782. "warning: checktime reached, "
  1783. "running e2fsck is recommended");
  1784. if (!sbi->s_journal)
  1785. es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  1786. if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
  1787. es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
  1788. le16_add_cpu(&es->s_mnt_count, 1);
  1789. es->s_mtime = cpu_to_le32(get_seconds());
  1790. ext4_update_dynamic_rev(sb);
  1791. if (sbi->s_journal)
  1792. ext4_set_feature_journal_needs_recovery(sb);
  1793. ext4_commit_super(sb, 1);
  1794. done:
  1795. if (test_opt(sb, DEBUG))
  1796. printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
  1797. "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
  1798. sb->s_blocksize,
  1799. sbi->s_groups_count,
  1800. EXT4_BLOCKS_PER_GROUP(sb),
  1801. EXT4_INODES_PER_GROUP(sb),
  1802. sbi->s_mount_opt, sbi->s_mount_opt2);
  1803. cleancache_init_fs(sb);
  1804. return res;
  1805. }
  1806. int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
  1807. {
  1808. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1809. struct flex_groups *new_groups;
  1810. int size;
  1811. if (!sbi->s_log_groups_per_flex)
  1812. return 0;
  1813. size = ext4_flex_group(sbi, ngroup - 1) + 1;
  1814. if (size <= sbi->s_flex_groups_allocated)
  1815. return 0;
  1816. size = roundup_pow_of_two(size * sizeof(struct flex_groups));
  1817. new_groups = ext4_kvzalloc(size, GFP_KERNEL);
  1818. if (!new_groups) {
  1819. ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
  1820. size / (int) sizeof(struct flex_groups));
  1821. return -ENOMEM;
  1822. }
  1823. if (sbi->s_flex_groups) {
  1824. memcpy(new_groups, sbi->s_flex_groups,
  1825. (sbi->s_flex_groups_allocated *
  1826. sizeof(struct flex_groups)));
  1827. kvfree(sbi->s_flex_groups);
  1828. }
  1829. sbi->s_flex_groups = new_groups;
  1830. sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
  1831. return 0;
  1832. }
  1833. static int ext4_fill_flex_info(struct super_block *sb)
  1834. {
  1835. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1836. struct ext4_group_desc *gdp = NULL;
  1837. ext4_group_t flex_group;
  1838. int i, err;
  1839. sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
  1840. if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
  1841. sbi->s_log_groups_per_flex = 0;
  1842. return 1;
  1843. }
  1844. err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
  1845. if (err)
  1846. goto failed;
  1847. for (i = 0; i < sbi->s_groups_count; i++) {
  1848. gdp = ext4_get_group_desc(sb, i, NULL);
  1849. flex_group = ext4_flex_group(sbi, i);
  1850. atomic_add(ext4_free_inodes_count(sb, gdp),
  1851. &sbi->s_flex_groups[flex_group].free_inodes);
  1852. atomic64_add(ext4_free_group_clusters(sb, gdp),
  1853. &sbi->s_flex_groups[flex_group].free_clusters);
  1854. atomic_add(ext4_used_dirs_count(sb, gdp),
  1855. &sbi->s_flex_groups[flex_group].used_dirs);
  1856. }
  1857. return 1;
  1858. failed:
  1859. return 0;
  1860. }
  1861. static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
  1862. struct ext4_group_desc *gdp)
  1863. {
  1864. int offset;
  1865. __u16 crc = 0;
  1866. __le32 le_group = cpu_to_le32(block_group);
  1867. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1868. if (ext4_has_metadata_csum(sbi->s_sb)) {
  1869. /* Use new metadata_csum algorithm */
  1870. __le16 save_csum;
  1871. __u32 csum32;
  1872. save_csum = gdp->bg_checksum;
  1873. gdp->bg_checksum = 0;
  1874. csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
  1875. sizeof(le_group));
  1876. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
  1877. sbi->s_desc_size);
  1878. gdp->bg_checksum = save_csum;
  1879. crc = csum32 & 0xFFFF;
  1880. goto out;
  1881. }
  1882. /* old crc16 code */
  1883. if (!ext4_has_feature_gdt_csum(sb))
  1884. return 0;
  1885. offset = offsetof(struct ext4_group_desc, bg_checksum);
  1886. crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
  1887. crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
  1888. crc = crc16(crc, (__u8 *)gdp, offset);
  1889. offset += sizeof(gdp->bg_checksum); /* skip checksum */
  1890. /* for checksum of struct ext4_group_desc do the rest...*/
  1891. if (ext4_has_feature_64bit(sb) &&
  1892. offset < le16_to_cpu(sbi->s_es->s_desc_size))
  1893. crc = crc16(crc, (__u8 *)gdp + offset,
  1894. le16_to_cpu(sbi->s_es->s_desc_size) -
  1895. offset);
  1896. out:
  1897. return cpu_to_le16(crc);
  1898. }
  1899. int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
  1900. struct ext4_group_desc *gdp)
  1901. {
  1902. if (ext4_has_group_desc_csum(sb) &&
  1903. (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
  1904. return 0;
  1905. return 1;
  1906. }
  1907. void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
  1908. struct ext4_group_desc *gdp)
  1909. {
  1910. if (!ext4_has_group_desc_csum(sb))
  1911. return;
  1912. gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
  1913. }
  1914. /* Called at mount-time, super-block is locked */
  1915. static int ext4_check_descriptors(struct super_block *sb,
  1916. ext4_group_t *first_not_zeroed)
  1917. {
  1918. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1919. ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
  1920. ext4_fsblk_t last_block;
  1921. ext4_fsblk_t block_bitmap;
  1922. ext4_fsblk_t inode_bitmap;
  1923. ext4_fsblk_t inode_table;
  1924. int flexbg_flag = 0;
  1925. ext4_group_t i, grp = sbi->s_groups_count;
  1926. if (ext4_has_feature_flex_bg(sb))
  1927. flexbg_flag = 1;
  1928. ext4_debug("Checking group descriptors");
  1929. for (i = 0; i < sbi->s_groups_count; i++) {
  1930. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  1931. if (i == sbi->s_groups_count - 1 || flexbg_flag)
  1932. last_block = ext4_blocks_count(sbi->s_es) - 1;
  1933. else
  1934. last_block = first_block +
  1935. (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  1936. if ((grp == sbi->s_groups_count) &&
  1937. !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  1938. grp = i;
  1939. block_bitmap = ext4_block_bitmap(sb, gdp);
  1940. if (block_bitmap < first_block || block_bitmap > last_block) {
  1941. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  1942. "Block bitmap for group %u not in group "
  1943. "(block %llu)!", i, block_bitmap);
  1944. return 0;
  1945. }
  1946. inode_bitmap = ext4_inode_bitmap(sb, gdp);
  1947. if (inode_bitmap < first_block || inode_bitmap > last_block) {
  1948. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  1949. "Inode bitmap for group %u not in group "
  1950. "(block %llu)!", i, inode_bitmap);
  1951. return 0;
  1952. }
  1953. inode_table = ext4_inode_table(sb, gdp);
  1954. if (inode_table < first_block ||
  1955. inode_table + sbi->s_itb_per_group - 1 > last_block) {
  1956. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  1957. "Inode table for group %u not in group "
  1958. "(block %llu)!", i, inode_table);
  1959. return 0;
  1960. }
  1961. ext4_lock_group(sb, i);
  1962. if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
  1963. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  1964. "Checksum for group %u failed (%u!=%u)",
  1965. i, le16_to_cpu(ext4_group_desc_csum(sb, i,
  1966. gdp)), le16_to_cpu(gdp->bg_checksum));
  1967. if (!(sb->s_flags & MS_RDONLY)) {
  1968. ext4_unlock_group(sb, i);
  1969. return 0;
  1970. }
  1971. }
  1972. ext4_unlock_group(sb, i);
  1973. if (!flexbg_flag)
  1974. first_block += EXT4_BLOCKS_PER_GROUP(sb);
  1975. }
  1976. if (NULL != first_not_zeroed)
  1977. *first_not_zeroed = grp;
  1978. return 1;
  1979. }
  1980. /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
  1981. * the superblock) which were deleted from all directories, but held open by
  1982. * a process at the time of a crash. We walk the list and try to delete these
  1983. * inodes at recovery time (only with a read-write filesystem).
  1984. *
  1985. * In order to keep the orphan inode chain consistent during traversal (in
  1986. * case of crash during recovery), we link each inode into the superblock
  1987. * orphan list_head and handle it the same way as an inode deletion during
  1988. * normal operation (which journals the operations for us).
  1989. *
  1990. * We only do an iget() and an iput() on each inode, which is very safe if we
  1991. * accidentally point at an in-use or already deleted inode. The worst that
  1992. * can happen in this case is that we get a "bit already cleared" message from
  1993. * ext4_free_inode(). The only reason we would point at a wrong inode is if
  1994. * e2fsck was run on this filesystem, and it must have already done the orphan
  1995. * inode cleanup for us, so we can safely abort without any further action.
  1996. */
  1997. static void ext4_orphan_cleanup(struct super_block *sb,
  1998. struct ext4_super_block *es)
  1999. {
  2000. unsigned int s_flags = sb->s_flags;
  2001. int nr_orphans = 0, nr_truncates = 0;
  2002. #ifdef CONFIG_QUOTA
  2003. int i;
  2004. #endif
  2005. if (!es->s_last_orphan) {
  2006. jbd_debug(4, "no orphan inodes to clean up\n");
  2007. return;
  2008. }
  2009. if (bdev_read_only(sb->s_bdev)) {
  2010. ext4_msg(sb, KERN_ERR, "write access "
  2011. "unavailable, skipping orphan cleanup");
  2012. return;
  2013. }
  2014. /* Check if feature set would not allow a r/w mount */
  2015. if (!ext4_feature_set_ok(sb, 0)) {
  2016. ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
  2017. "unknown ROCOMPAT features");
  2018. return;
  2019. }
  2020. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2021. /* don't clear list on RO mount w/ errors */
  2022. if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
  2023. ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
  2024. "clearing orphan list.\n");
  2025. es->s_last_orphan = 0;
  2026. }
  2027. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2028. return;
  2029. }
  2030. if (s_flags & MS_RDONLY) {
  2031. ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
  2032. sb->s_flags &= ~MS_RDONLY;
  2033. }
  2034. #ifdef CONFIG_QUOTA
  2035. /* Needed for iput() to work correctly and not trash data */
  2036. sb->s_flags |= MS_ACTIVE;
  2037. /* Turn on quotas so that they are updated correctly */
  2038. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2039. if (EXT4_SB(sb)->s_qf_names[i]) {
  2040. int ret = ext4_quota_on_mount(sb, i);
  2041. if (ret < 0)
  2042. ext4_msg(sb, KERN_ERR,
  2043. "Cannot turn on journaled "
  2044. "quota: error %d", ret);
  2045. }
  2046. }
  2047. #endif
  2048. while (es->s_last_orphan) {
  2049. struct inode *inode;
  2050. inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
  2051. if (IS_ERR(inode)) {
  2052. es->s_last_orphan = 0;
  2053. break;
  2054. }
  2055. list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
  2056. dquot_initialize(inode);
  2057. if (inode->i_nlink) {
  2058. if (test_opt(sb, DEBUG))
  2059. ext4_msg(sb, KERN_DEBUG,
  2060. "%s: truncating inode %lu to %lld bytes",
  2061. __func__, inode->i_ino, inode->i_size);
  2062. jbd_debug(2, "truncating inode %lu to %lld bytes\n",
  2063. inode->i_ino, inode->i_size);
  2064. inode_lock(inode);
  2065. truncate_inode_pages(inode->i_mapping, inode->i_size);
  2066. ext4_truncate(inode);
  2067. inode_unlock(inode);
  2068. nr_truncates++;
  2069. } else {
  2070. if (test_opt(sb, DEBUG))
  2071. ext4_msg(sb, KERN_DEBUG,
  2072. "%s: deleting unreferenced inode %lu",
  2073. __func__, inode->i_ino);
  2074. jbd_debug(2, "deleting unreferenced inode %lu\n",
  2075. inode->i_ino);
  2076. nr_orphans++;
  2077. }
  2078. iput(inode); /* The delete magic happens here! */
  2079. }
  2080. #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
  2081. if (nr_orphans)
  2082. ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
  2083. PLURAL(nr_orphans));
  2084. if (nr_truncates)
  2085. ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
  2086. PLURAL(nr_truncates));
  2087. #ifdef CONFIG_QUOTA
  2088. /* Turn quotas off */
  2089. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2090. if (sb_dqopt(sb)->files[i])
  2091. dquot_quota_off(sb, i);
  2092. }
  2093. #endif
  2094. sb->s_flags = s_flags; /* Restore MS_RDONLY status */
  2095. }
  2096. /*
  2097. * Maximal extent format file size.
  2098. * Resulting logical blkno at s_maxbytes must fit in our on-disk
  2099. * extent format containers, within a sector_t, and within i_blocks
  2100. * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
  2101. * so that won't be a limiting factor.
  2102. *
  2103. * However there is other limiting factor. We do store extents in the form
  2104. * of starting block and length, hence the resulting length of the extent
  2105. * covering maximum file size must fit into on-disk format containers as
  2106. * well. Given that length is always by 1 unit bigger than max unit (because
  2107. * we count 0 as well) we have to lower the s_maxbytes by one fs block.
  2108. *
  2109. * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  2110. */
  2111. static loff_t ext4_max_size(int blkbits, int has_huge_files)
  2112. {
  2113. loff_t res;
  2114. loff_t upper_limit = MAX_LFS_FILESIZE;
  2115. /* small i_blocks in vfs inode? */
  2116. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2117. /*
  2118. * CONFIG_LBDAF is not enabled implies the inode
  2119. * i_block represent total blocks in 512 bytes
  2120. * 32 == size of vfs inode i_blocks * 8
  2121. */
  2122. upper_limit = (1LL << 32) - 1;
  2123. /* total blocks in file system block size */
  2124. upper_limit >>= (blkbits - 9);
  2125. upper_limit <<= blkbits;
  2126. }
  2127. /*
  2128. * 32-bit extent-start container, ee_block. We lower the maxbytes
  2129. * by one fs block, so ee_len can cover the extent of maximum file
  2130. * size
  2131. */
  2132. res = (1LL << 32) - 1;
  2133. res <<= blkbits;
  2134. /* Sanity check against vm- & vfs- imposed limits */
  2135. if (res > upper_limit)
  2136. res = upper_limit;
  2137. return res;
  2138. }
  2139. /*
  2140. * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
  2141. * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
  2142. * We need to be 1 filesystem block less than the 2^48 sector limit.
  2143. */
  2144. static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
  2145. {
  2146. loff_t res = EXT4_NDIR_BLOCKS;
  2147. int meta_blocks;
  2148. loff_t upper_limit;
  2149. /* This is calculated to be the largest file size for a dense, block
  2150. * mapped file such that the file's total number of 512-byte sectors,
  2151. * including data and all indirect blocks, does not exceed (2^48 - 1).
  2152. *
  2153. * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
  2154. * number of 512-byte sectors of the file.
  2155. */
  2156. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2157. /*
  2158. * !has_huge_files or CONFIG_LBDAF not enabled implies that
  2159. * the inode i_block field represents total file blocks in
  2160. * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
  2161. */
  2162. upper_limit = (1LL << 32) - 1;
  2163. /* total blocks in file system block size */
  2164. upper_limit >>= (bits - 9);
  2165. } else {
  2166. /*
  2167. * We use 48 bit ext4_inode i_blocks
  2168. * With EXT4_HUGE_FILE_FL set the i_blocks
  2169. * represent total number of blocks in
  2170. * file system block size
  2171. */
  2172. upper_limit = (1LL << 48) - 1;
  2173. }
  2174. /* indirect blocks */
  2175. meta_blocks = 1;
  2176. /* double indirect blocks */
  2177. meta_blocks += 1 + (1LL << (bits-2));
  2178. /* tripple indirect blocks */
  2179. meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
  2180. upper_limit -= meta_blocks;
  2181. upper_limit <<= bits;
  2182. res += 1LL << (bits-2);
  2183. res += 1LL << (2*(bits-2));
  2184. res += 1LL << (3*(bits-2));
  2185. res <<= bits;
  2186. if (res > upper_limit)
  2187. res = upper_limit;
  2188. if (res > MAX_LFS_FILESIZE)
  2189. res = MAX_LFS_FILESIZE;
  2190. return res;
  2191. }
  2192. static ext4_fsblk_t descriptor_loc(struct super_block *sb,
  2193. ext4_fsblk_t logical_sb_block, int nr)
  2194. {
  2195. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2196. ext4_group_t bg, first_meta_bg;
  2197. int has_super = 0;
  2198. first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
  2199. if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
  2200. return logical_sb_block + nr + 1;
  2201. bg = sbi->s_desc_per_block * nr;
  2202. if (ext4_bg_has_super(sb, bg))
  2203. has_super = 1;
  2204. /*
  2205. * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
  2206. * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
  2207. * on modern mke2fs or blksize > 1k on older mke2fs) then we must
  2208. * compensate.
  2209. */
  2210. if (sb->s_blocksize == 1024 && nr == 0 &&
  2211. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
  2212. has_super++;
  2213. return (has_super + ext4_group_first_block_no(sb, bg));
  2214. }
  2215. /**
  2216. * ext4_get_stripe_size: Get the stripe size.
  2217. * @sbi: In memory super block info
  2218. *
  2219. * If we have specified it via mount option, then
  2220. * use the mount option value. If the value specified at mount time is
  2221. * greater than the blocks per group use the super block value.
  2222. * If the super block value is greater than blocks per group return 0.
  2223. * Allocator needs it be less than blocks per group.
  2224. *
  2225. */
  2226. static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
  2227. {
  2228. unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
  2229. unsigned long stripe_width =
  2230. le32_to_cpu(sbi->s_es->s_raid_stripe_width);
  2231. int ret;
  2232. if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
  2233. ret = sbi->s_stripe;
  2234. else if (stripe_width <= sbi->s_blocks_per_group)
  2235. ret = stripe_width;
  2236. else if (stride <= sbi->s_blocks_per_group)
  2237. ret = stride;
  2238. else
  2239. ret = 0;
  2240. /*
  2241. * If the stripe width is 1, this makes no sense and
  2242. * we set it to 0 to turn off stripe handling code.
  2243. */
  2244. if (ret <= 1)
  2245. ret = 0;
  2246. return ret;
  2247. }
  2248. /*
  2249. * Check whether this filesystem can be mounted based on
  2250. * the features present and the RDONLY/RDWR mount requested.
  2251. * Returns 1 if this filesystem can be mounted as requested,
  2252. * 0 if it cannot be.
  2253. */
  2254. static int ext4_feature_set_ok(struct super_block *sb, int readonly)
  2255. {
  2256. if (ext4_has_unknown_ext4_incompat_features(sb)) {
  2257. ext4_msg(sb, KERN_ERR,
  2258. "Couldn't mount because of "
  2259. "unsupported optional features (%x)",
  2260. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
  2261. ~EXT4_FEATURE_INCOMPAT_SUPP));
  2262. return 0;
  2263. }
  2264. if (readonly)
  2265. return 1;
  2266. if (ext4_has_feature_readonly(sb)) {
  2267. ext4_msg(sb, KERN_INFO, "filesystem is read-only");
  2268. sb->s_flags |= MS_RDONLY;
  2269. return 1;
  2270. }
  2271. /* Check that feature set is OK for a read-write mount */
  2272. if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
  2273. ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
  2274. "unsupported optional features (%x)",
  2275. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
  2276. ~EXT4_FEATURE_RO_COMPAT_SUPP));
  2277. return 0;
  2278. }
  2279. /*
  2280. * Large file size enabled file system can only be mounted
  2281. * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
  2282. */
  2283. if (ext4_has_feature_huge_file(sb)) {
  2284. if (sizeof(blkcnt_t) < sizeof(u64)) {
  2285. ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
  2286. "cannot be mounted RDWR without "
  2287. "CONFIG_LBDAF");
  2288. return 0;
  2289. }
  2290. }
  2291. if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
  2292. ext4_msg(sb, KERN_ERR,
  2293. "Can't support bigalloc feature without "
  2294. "extents feature\n");
  2295. return 0;
  2296. }
  2297. #ifndef CONFIG_QUOTA
  2298. if (ext4_has_feature_quota(sb) && !readonly) {
  2299. ext4_msg(sb, KERN_ERR,
  2300. "Filesystem with quota feature cannot be mounted RDWR "
  2301. "without CONFIG_QUOTA");
  2302. return 0;
  2303. }
  2304. if (ext4_has_feature_project(sb) && !readonly) {
  2305. ext4_msg(sb, KERN_ERR,
  2306. "Filesystem with project quota feature cannot be mounted RDWR "
  2307. "without CONFIG_QUOTA");
  2308. return 0;
  2309. }
  2310. #endif /* CONFIG_QUOTA */
  2311. return 1;
  2312. }
  2313. /*
  2314. * This function is called once a day if we have errors logged
  2315. * on the file system
  2316. */
  2317. static void print_daily_error_info(unsigned long arg)
  2318. {
  2319. struct super_block *sb = (struct super_block *) arg;
  2320. struct ext4_sb_info *sbi;
  2321. struct ext4_super_block *es;
  2322. sbi = EXT4_SB(sb);
  2323. es = sbi->s_es;
  2324. if (es->s_error_count)
  2325. /* fsck newer than v1.41.13 is needed to clean this condition. */
  2326. ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
  2327. le32_to_cpu(es->s_error_count));
  2328. if (es->s_first_error_time) {
  2329. printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
  2330. sb->s_id, le32_to_cpu(es->s_first_error_time),
  2331. (int) sizeof(es->s_first_error_func),
  2332. es->s_first_error_func,
  2333. le32_to_cpu(es->s_first_error_line));
  2334. if (es->s_first_error_ino)
  2335. printk(": inode %u",
  2336. le32_to_cpu(es->s_first_error_ino));
  2337. if (es->s_first_error_block)
  2338. printk(": block %llu", (unsigned long long)
  2339. le64_to_cpu(es->s_first_error_block));
  2340. printk("\n");
  2341. }
  2342. if (es->s_last_error_time) {
  2343. printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
  2344. sb->s_id, le32_to_cpu(es->s_last_error_time),
  2345. (int) sizeof(es->s_last_error_func),
  2346. es->s_last_error_func,
  2347. le32_to_cpu(es->s_last_error_line));
  2348. if (es->s_last_error_ino)
  2349. printk(": inode %u",
  2350. le32_to_cpu(es->s_last_error_ino));
  2351. if (es->s_last_error_block)
  2352. printk(": block %llu", (unsigned long long)
  2353. le64_to_cpu(es->s_last_error_block));
  2354. printk("\n");
  2355. }
  2356. mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
  2357. }
  2358. /* Find next suitable group and run ext4_init_inode_table */
  2359. static int ext4_run_li_request(struct ext4_li_request *elr)
  2360. {
  2361. struct ext4_group_desc *gdp = NULL;
  2362. ext4_group_t group, ngroups;
  2363. struct super_block *sb;
  2364. unsigned long timeout = 0;
  2365. int ret = 0;
  2366. sb = elr->lr_super;
  2367. ngroups = EXT4_SB(sb)->s_groups_count;
  2368. sb_start_write(sb);
  2369. for (group = elr->lr_next_group; group < ngroups; group++) {
  2370. gdp = ext4_get_group_desc(sb, group, NULL);
  2371. if (!gdp) {
  2372. ret = 1;
  2373. break;
  2374. }
  2375. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2376. break;
  2377. }
  2378. if (group >= ngroups)
  2379. ret = 1;
  2380. if (!ret) {
  2381. timeout = jiffies;
  2382. ret = ext4_init_inode_table(sb, group,
  2383. elr->lr_timeout ? 0 : 1);
  2384. if (elr->lr_timeout == 0) {
  2385. timeout = (jiffies - timeout) *
  2386. elr->lr_sbi->s_li_wait_mult;
  2387. elr->lr_timeout = timeout;
  2388. }
  2389. elr->lr_next_sched = jiffies + elr->lr_timeout;
  2390. elr->lr_next_group = group + 1;
  2391. }
  2392. sb_end_write(sb);
  2393. return ret;
  2394. }
  2395. /*
  2396. * Remove lr_request from the list_request and free the
  2397. * request structure. Should be called with li_list_mtx held
  2398. */
  2399. static void ext4_remove_li_request(struct ext4_li_request *elr)
  2400. {
  2401. struct ext4_sb_info *sbi;
  2402. if (!elr)
  2403. return;
  2404. sbi = elr->lr_sbi;
  2405. list_del(&elr->lr_request);
  2406. sbi->s_li_request = NULL;
  2407. kfree(elr);
  2408. }
  2409. static void ext4_unregister_li_request(struct super_block *sb)
  2410. {
  2411. mutex_lock(&ext4_li_mtx);
  2412. if (!ext4_li_info) {
  2413. mutex_unlock(&ext4_li_mtx);
  2414. return;
  2415. }
  2416. mutex_lock(&ext4_li_info->li_list_mtx);
  2417. ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
  2418. mutex_unlock(&ext4_li_info->li_list_mtx);
  2419. mutex_unlock(&ext4_li_mtx);
  2420. }
  2421. static struct task_struct *ext4_lazyinit_task;
  2422. /*
  2423. * This is the function where ext4lazyinit thread lives. It walks
  2424. * through the request list searching for next scheduled filesystem.
  2425. * When such a fs is found, run the lazy initialization request
  2426. * (ext4_rn_li_request) and keep track of the time spend in this
  2427. * function. Based on that time we compute next schedule time of
  2428. * the request. When walking through the list is complete, compute
  2429. * next waking time and put itself into sleep.
  2430. */
  2431. static int ext4_lazyinit_thread(void *arg)
  2432. {
  2433. struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
  2434. struct list_head *pos, *n;
  2435. struct ext4_li_request *elr;
  2436. unsigned long next_wakeup, cur;
  2437. BUG_ON(NULL == eli);
  2438. cont_thread:
  2439. while (true) {
  2440. next_wakeup = MAX_JIFFY_OFFSET;
  2441. mutex_lock(&eli->li_list_mtx);
  2442. if (list_empty(&eli->li_request_list)) {
  2443. mutex_unlock(&eli->li_list_mtx);
  2444. goto exit_thread;
  2445. }
  2446. list_for_each_safe(pos, n, &eli->li_request_list) {
  2447. elr = list_entry(pos, struct ext4_li_request,
  2448. lr_request);
  2449. if (time_after_eq(jiffies, elr->lr_next_sched)) {
  2450. if (ext4_run_li_request(elr) != 0) {
  2451. /* error, remove the lazy_init job */
  2452. ext4_remove_li_request(elr);
  2453. continue;
  2454. }
  2455. }
  2456. if (time_before(elr->lr_next_sched, next_wakeup))
  2457. next_wakeup = elr->lr_next_sched;
  2458. }
  2459. mutex_unlock(&eli->li_list_mtx);
  2460. try_to_freeze();
  2461. cur = jiffies;
  2462. if ((time_after_eq(cur, next_wakeup)) ||
  2463. (MAX_JIFFY_OFFSET == next_wakeup)) {
  2464. cond_resched();
  2465. continue;
  2466. }
  2467. schedule_timeout_interruptible(next_wakeup - cur);
  2468. if (kthread_should_stop()) {
  2469. ext4_clear_request_list();
  2470. goto exit_thread;
  2471. }
  2472. }
  2473. exit_thread:
  2474. /*
  2475. * It looks like the request list is empty, but we need
  2476. * to check it under the li_list_mtx lock, to prevent any
  2477. * additions into it, and of course we should lock ext4_li_mtx
  2478. * to atomically free the list and ext4_li_info, because at
  2479. * this point another ext4 filesystem could be registering
  2480. * new one.
  2481. */
  2482. mutex_lock(&ext4_li_mtx);
  2483. mutex_lock(&eli->li_list_mtx);
  2484. if (!list_empty(&eli->li_request_list)) {
  2485. mutex_unlock(&eli->li_list_mtx);
  2486. mutex_unlock(&ext4_li_mtx);
  2487. goto cont_thread;
  2488. }
  2489. mutex_unlock(&eli->li_list_mtx);
  2490. kfree(ext4_li_info);
  2491. ext4_li_info = NULL;
  2492. mutex_unlock(&ext4_li_mtx);
  2493. return 0;
  2494. }
  2495. static void ext4_clear_request_list(void)
  2496. {
  2497. struct list_head *pos, *n;
  2498. struct ext4_li_request *elr;
  2499. mutex_lock(&ext4_li_info->li_list_mtx);
  2500. list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
  2501. elr = list_entry(pos, struct ext4_li_request,
  2502. lr_request);
  2503. ext4_remove_li_request(elr);
  2504. }
  2505. mutex_unlock(&ext4_li_info->li_list_mtx);
  2506. }
  2507. static int ext4_run_lazyinit_thread(void)
  2508. {
  2509. ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
  2510. ext4_li_info, "ext4lazyinit");
  2511. if (IS_ERR(ext4_lazyinit_task)) {
  2512. int err = PTR_ERR(ext4_lazyinit_task);
  2513. ext4_clear_request_list();
  2514. kfree(ext4_li_info);
  2515. ext4_li_info = NULL;
  2516. printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
  2517. "initialization thread\n",
  2518. err);
  2519. return err;
  2520. }
  2521. ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
  2522. return 0;
  2523. }
  2524. /*
  2525. * Check whether it make sense to run itable init. thread or not.
  2526. * If there is at least one uninitialized inode table, return
  2527. * corresponding group number, else the loop goes through all
  2528. * groups and return total number of groups.
  2529. */
  2530. static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
  2531. {
  2532. ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
  2533. struct ext4_group_desc *gdp = NULL;
  2534. for (group = 0; group < ngroups; group++) {
  2535. gdp = ext4_get_group_desc(sb, group, NULL);
  2536. if (!gdp)
  2537. continue;
  2538. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2539. break;
  2540. }
  2541. return group;
  2542. }
  2543. static int ext4_li_info_new(void)
  2544. {
  2545. struct ext4_lazy_init *eli = NULL;
  2546. eli = kzalloc(sizeof(*eli), GFP_KERNEL);
  2547. if (!eli)
  2548. return -ENOMEM;
  2549. INIT_LIST_HEAD(&eli->li_request_list);
  2550. mutex_init(&eli->li_list_mtx);
  2551. eli->li_state |= EXT4_LAZYINIT_QUIT;
  2552. ext4_li_info = eli;
  2553. return 0;
  2554. }
  2555. static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
  2556. ext4_group_t start)
  2557. {
  2558. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2559. struct ext4_li_request *elr;
  2560. elr = kzalloc(sizeof(*elr), GFP_KERNEL);
  2561. if (!elr)
  2562. return NULL;
  2563. elr->lr_super = sb;
  2564. elr->lr_sbi = sbi;
  2565. elr->lr_next_group = start;
  2566. /*
  2567. * Randomize first schedule time of the request to
  2568. * spread the inode table initialization requests
  2569. * better.
  2570. */
  2571. elr->lr_next_sched = jiffies + (prandom_u32() %
  2572. (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2573. return elr;
  2574. }
  2575. int ext4_register_li_request(struct super_block *sb,
  2576. ext4_group_t first_not_zeroed)
  2577. {
  2578. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2579. struct ext4_li_request *elr = NULL;
  2580. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  2581. int ret = 0;
  2582. mutex_lock(&ext4_li_mtx);
  2583. if (sbi->s_li_request != NULL) {
  2584. /*
  2585. * Reset timeout so it can be computed again, because
  2586. * s_li_wait_mult might have changed.
  2587. */
  2588. sbi->s_li_request->lr_timeout = 0;
  2589. goto out;
  2590. }
  2591. if (first_not_zeroed == ngroups ||
  2592. (sb->s_flags & MS_RDONLY) ||
  2593. !test_opt(sb, INIT_INODE_TABLE))
  2594. goto out;
  2595. elr = ext4_li_request_new(sb, first_not_zeroed);
  2596. if (!elr) {
  2597. ret = -ENOMEM;
  2598. goto out;
  2599. }
  2600. if (NULL == ext4_li_info) {
  2601. ret = ext4_li_info_new();
  2602. if (ret)
  2603. goto out;
  2604. }
  2605. mutex_lock(&ext4_li_info->li_list_mtx);
  2606. list_add(&elr->lr_request, &ext4_li_info->li_request_list);
  2607. mutex_unlock(&ext4_li_info->li_list_mtx);
  2608. sbi->s_li_request = elr;
  2609. /*
  2610. * set elr to NULL here since it has been inserted to
  2611. * the request_list and the removal and free of it is
  2612. * handled by ext4_clear_request_list from now on.
  2613. */
  2614. elr = NULL;
  2615. if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
  2616. ret = ext4_run_lazyinit_thread();
  2617. if (ret)
  2618. goto out;
  2619. }
  2620. out:
  2621. mutex_unlock(&ext4_li_mtx);
  2622. if (ret)
  2623. kfree(elr);
  2624. return ret;
  2625. }
  2626. /*
  2627. * We do not need to lock anything since this is called on
  2628. * module unload.
  2629. */
  2630. static void ext4_destroy_lazyinit_thread(void)
  2631. {
  2632. /*
  2633. * If thread exited earlier
  2634. * there's nothing to be done.
  2635. */
  2636. if (!ext4_li_info || !ext4_lazyinit_task)
  2637. return;
  2638. kthread_stop(ext4_lazyinit_task);
  2639. }
  2640. static int set_journal_csum_feature_set(struct super_block *sb)
  2641. {
  2642. int ret = 1;
  2643. int compat, incompat;
  2644. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2645. if (ext4_has_metadata_csum(sb)) {
  2646. /* journal checksum v3 */
  2647. compat = 0;
  2648. incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
  2649. } else {
  2650. /* journal checksum v1 */
  2651. compat = JBD2_FEATURE_COMPAT_CHECKSUM;
  2652. incompat = 0;
  2653. }
  2654. jbd2_journal_clear_features(sbi->s_journal,
  2655. JBD2_FEATURE_COMPAT_CHECKSUM, 0,
  2656. JBD2_FEATURE_INCOMPAT_CSUM_V3 |
  2657. JBD2_FEATURE_INCOMPAT_CSUM_V2);
  2658. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  2659. ret = jbd2_journal_set_features(sbi->s_journal,
  2660. compat, 0,
  2661. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
  2662. incompat);
  2663. } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
  2664. ret = jbd2_journal_set_features(sbi->s_journal,
  2665. compat, 0,
  2666. incompat);
  2667. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2668. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2669. } else {
  2670. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2671. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2672. }
  2673. return ret;
  2674. }
  2675. /*
  2676. * Note: calculating the overhead so we can be compatible with
  2677. * historical BSD practice is quite difficult in the face of
  2678. * clusters/bigalloc. This is because multiple metadata blocks from
  2679. * different block group can end up in the same allocation cluster.
  2680. * Calculating the exact overhead in the face of clustered allocation
  2681. * requires either O(all block bitmaps) in memory or O(number of block
  2682. * groups**2) in time. We will still calculate the superblock for
  2683. * older file systems --- and if we come across with a bigalloc file
  2684. * system with zero in s_overhead_clusters the estimate will be close to
  2685. * correct especially for very large cluster sizes --- but for newer
  2686. * file systems, it's better to calculate this figure once at mkfs
  2687. * time, and store it in the superblock. If the superblock value is
  2688. * present (even for non-bigalloc file systems), we will use it.
  2689. */
  2690. static int count_overhead(struct super_block *sb, ext4_group_t grp,
  2691. char *buf)
  2692. {
  2693. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2694. struct ext4_group_desc *gdp;
  2695. ext4_fsblk_t first_block, last_block, b;
  2696. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2697. int s, j, count = 0;
  2698. if (!ext4_has_feature_bigalloc(sb))
  2699. return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
  2700. sbi->s_itb_per_group + 2);
  2701. first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
  2702. (grp * EXT4_BLOCKS_PER_GROUP(sb));
  2703. last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
  2704. for (i = 0; i < ngroups; i++) {
  2705. gdp = ext4_get_group_desc(sb, i, NULL);
  2706. b = ext4_block_bitmap(sb, gdp);
  2707. if (b >= first_block && b <= last_block) {
  2708. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2709. count++;
  2710. }
  2711. b = ext4_inode_bitmap(sb, gdp);
  2712. if (b >= first_block && b <= last_block) {
  2713. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2714. count++;
  2715. }
  2716. b = ext4_inode_table(sb, gdp);
  2717. if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
  2718. for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
  2719. int c = EXT4_B2C(sbi, b - first_block);
  2720. ext4_set_bit(c, buf);
  2721. count++;
  2722. }
  2723. if (i != grp)
  2724. continue;
  2725. s = 0;
  2726. if (ext4_bg_has_super(sb, grp)) {
  2727. ext4_set_bit(s++, buf);
  2728. count++;
  2729. }
  2730. for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
  2731. ext4_set_bit(EXT4_B2C(sbi, s++), buf);
  2732. count++;
  2733. }
  2734. }
  2735. if (!count)
  2736. return 0;
  2737. return EXT4_CLUSTERS_PER_GROUP(sb) -
  2738. ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
  2739. }
  2740. /*
  2741. * Compute the overhead and stash it in sbi->s_overhead
  2742. */
  2743. int ext4_calculate_overhead(struct super_block *sb)
  2744. {
  2745. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2746. struct ext4_super_block *es = sbi->s_es;
  2747. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2748. ext4_fsblk_t overhead = 0;
  2749. char *buf = (char *) get_zeroed_page(GFP_NOFS);
  2750. if (!buf)
  2751. return -ENOMEM;
  2752. /*
  2753. * Compute the overhead (FS structures). This is constant
  2754. * for a given filesystem unless the number of block groups
  2755. * changes so we cache the previous value until it does.
  2756. */
  2757. /*
  2758. * All of the blocks before first_data_block are overhead
  2759. */
  2760. overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
  2761. /*
  2762. * Add the overhead found in each block group
  2763. */
  2764. for (i = 0; i < ngroups; i++) {
  2765. int blks;
  2766. blks = count_overhead(sb, i, buf);
  2767. overhead += blks;
  2768. if (blks)
  2769. memset(buf, 0, PAGE_SIZE);
  2770. cond_resched();
  2771. }
  2772. /* Add the internal journal blocks as well */
  2773. if (sbi->s_journal && !sbi->journal_bdev)
  2774. overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
  2775. sbi->s_overhead = overhead;
  2776. smp_wmb();
  2777. free_page((unsigned long) buf);
  2778. return 0;
  2779. }
  2780. static void ext4_set_resv_clusters(struct super_block *sb)
  2781. {
  2782. ext4_fsblk_t resv_clusters;
  2783. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2784. /*
  2785. * There's no need to reserve anything when we aren't using extents.
  2786. * The space estimates are exact, there are no unwritten extents,
  2787. * hole punching doesn't need new metadata... This is needed especially
  2788. * to keep ext2/3 backward compatibility.
  2789. */
  2790. if (!ext4_has_feature_extents(sb))
  2791. return;
  2792. /*
  2793. * By default we reserve 2% or 4096 clusters, whichever is smaller.
  2794. * This should cover the situations where we can not afford to run
  2795. * out of space like for example punch hole, or converting
  2796. * unwritten extents in delalloc path. In most cases such
  2797. * allocation would require 1, or 2 blocks, higher numbers are
  2798. * very rare.
  2799. */
  2800. resv_clusters = (ext4_blocks_count(sbi->s_es) >>
  2801. sbi->s_cluster_bits);
  2802. do_div(resv_clusters, 50);
  2803. resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
  2804. atomic64_set(&sbi->s_resv_clusters, resv_clusters);
  2805. }
  2806. static int ext4_fill_super(struct super_block *sb, void *data, int silent)
  2807. {
  2808. char *orig_data = kstrdup(data, GFP_KERNEL);
  2809. struct buffer_head *bh;
  2810. struct ext4_super_block *es = NULL;
  2811. struct ext4_sb_info *sbi;
  2812. ext4_fsblk_t block;
  2813. ext4_fsblk_t sb_block = get_sb_block(&data);
  2814. ext4_fsblk_t logical_sb_block;
  2815. unsigned long offset = 0;
  2816. unsigned long journal_devnum = 0;
  2817. unsigned long def_mount_opts;
  2818. struct inode *root;
  2819. const char *descr;
  2820. int ret = -ENOMEM;
  2821. int blocksize, clustersize;
  2822. unsigned int db_count;
  2823. unsigned int i;
  2824. int needs_recovery, has_huge_files, has_bigalloc;
  2825. __u64 blocks_count;
  2826. int err = 0;
  2827. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  2828. ext4_group_t first_not_zeroed;
  2829. sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
  2830. if (!sbi)
  2831. goto out_free_orig;
  2832. sbi->s_blockgroup_lock =
  2833. kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
  2834. if (!sbi->s_blockgroup_lock) {
  2835. kfree(sbi);
  2836. goto out_free_orig;
  2837. }
  2838. sb->s_fs_info = sbi;
  2839. sbi->s_sb = sb;
  2840. sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
  2841. sbi->s_sb_block = sb_block;
  2842. if (sb->s_bdev->bd_part)
  2843. sbi->s_sectors_written_start =
  2844. part_stat_read(sb->s_bdev->bd_part, sectors[1]);
  2845. /* Cleanup superblock name */
  2846. strreplace(sb->s_id, '/', '!');
  2847. /* -EINVAL is default */
  2848. ret = -EINVAL;
  2849. blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
  2850. if (!blocksize) {
  2851. ext4_msg(sb, KERN_ERR, "unable to set blocksize");
  2852. goto out_fail;
  2853. }
  2854. /*
  2855. * The ext4 superblock will not be buffer aligned for other than 1kB
  2856. * block sizes. We need to calculate the offset from buffer start.
  2857. */
  2858. if (blocksize != EXT4_MIN_BLOCK_SIZE) {
  2859. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  2860. offset = do_div(logical_sb_block, blocksize);
  2861. } else {
  2862. logical_sb_block = sb_block;
  2863. }
  2864. if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
  2865. ext4_msg(sb, KERN_ERR, "unable to read superblock");
  2866. goto out_fail;
  2867. }
  2868. /*
  2869. * Note: s_es must be initialized as soon as possible because
  2870. * some ext4 macro-instructions depend on its value
  2871. */
  2872. es = (struct ext4_super_block *) (bh->b_data + offset);
  2873. sbi->s_es = es;
  2874. sb->s_magic = le16_to_cpu(es->s_magic);
  2875. if (sb->s_magic != EXT4_SUPER_MAGIC)
  2876. goto cantfind_ext4;
  2877. sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
  2878. /* Warn if metadata_csum and gdt_csum are both set. */
  2879. if (ext4_has_feature_metadata_csum(sb) &&
  2880. ext4_has_feature_gdt_csum(sb))
  2881. ext4_warning(sb, "metadata_csum and uninit_bg are "
  2882. "redundant flags; please run fsck.");
  2883. /* Check for a known checksum algorithm */
  2884. if (!ext4_verify_csum_type(sb, es)) {
  2885. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  2886. "unknown checksum algorithm.");
  2887. silent = 1;
  2888. goto cantfind_ext4;
  2889. }
  2890. /* Load the checksum driver */
  2891. if (ext4_has_feature_metadata_csum(sb)) {
  2892. sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
  2893. if (IS_ERR(sbi->s_chksum_driver)) {
  2894. ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
  2895. ret = PTR_ERR(sbi->s_chksum_driver);
  2896. sbi->s_chksum_driver = NULL;
  2897. goto failed_mount;
  2898. }
  2899. }
  2900. /* Check superblock checksum */
  2901. if (!ext4_superblock_csum_verify(sb, es)) {
  2902. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  2903. "invalid superblock checksum. Run e2fsck?");
  2904. silent = 1;
  2905. ret = -EFSBADCRC;
  2906. goto cantfind_ext4;
  2907. }
  2908. /* Precompute checksum seed for all metadata */
  2909. if (ext4_has_feature_csum_seed(sb))
  2910. sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
  2911. else if (ext4_has_metadata_csum(sb))
  2912. sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
  2913. sizeof(es->s_uuid));
  2914. /* Set defaults before we parse the mount options */
  2915. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  2916. set_opt(sb, INIT_INODE_TABLE);
  2917. if (def_mount_opts & EXT4_DEFM_DEBUG)
  2918. set_opt(sb, DEBUG);
  2919. if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
  2920. set_opt(sb, GRPID);
  2921. if (def_mount_opts & EXT4_DEFM_UID16)
  2922. set_opt(sb, NO_UID32);
  2923. /* xattr user namespace & acls are now defaulted on */
  2924. set_opt(sb, XATTR_USER);
  2925. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  2926. set_opt(sb, POSIX_ACL);
  2927. #endif
  2928. /* don't forget to enable journal_csum when metadata_csum is enabled. */
  2929. if (ext4_has_metadata_csum(sb))
  2930. set_opt(sb, JOURNAL_CHECKSUM);
  2931. if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
  2932. set_opt(sb, JOURNAL_DATA);
  2933. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
  2934. set_opt(sb, ORDERED_DATA);
  2935. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
  2936. set_opt(sb, WRITEBACK_DATA);
  2937. if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
  2938. set_opt(sb, ERRORS_PANIC);
  2939. else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
  2940. set_opt(sb, ERRORS_CONT);
  2941. else
  2942. set_opt(sb, ERRORS_RO);
  2943. /* block_validity enabled by default; disable with noblock_validity */
  2944. set_opt(sb, BLOCK_VALIDITY);
  2945. if (def_mount_opts & EXT4_DEFM_DISCARD)
  2946. set_opt(sb, DISCARD);
  2947. sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
  2948. sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
  2949. sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
  2950. sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
  2951. sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
  2952. if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
  2953. set_opt(sb, BARRIER);
  2954. /*
  2955. * enable delayed allocation by default
  2956. * Use -o nodelalloc to turn it off
  2957. */
  2958. if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
  2959. ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
  2960. set_opt(sb, DELALLOC);
  2961. /*
  2962. * set default s_li_wait_mult for lazyinit, for the case there is
  2963. * no mount option specified.
  2964. */
  2965. sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
  2966. if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
  2967. &journal_devnum, &journal_ioprio, 0)) {
  2968. ext4_msg(sb, KERN_WARNING,
  2969. "failed to parse options in superblock: %s",
  2970. sbi->s_es->s_mount_opts);
  2971. }
  2972. sbi->s_def_mount_opt = sbi->s_mount_opt;
  2973. if (!parse_options((char *) data, sb, &journal_devnum,
  2974. &journal_ioprio, 0))
  2975. goto failed_mount;
  2976. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  2977. printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
  2978. "with data=journal disables delayed "
  2979. "allocation and O_DIRECT support!\n");
  2980. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  2981. ext4_msg(sb, KERN_ERR, "can't mount with "
  2982. "both data=journal and delalloc");
  2983. goto failed_mount;
  2984. }
  2985. if (test_opt(sb, DIOREAD_NOLOCK)) {
  2986. ext4_msg(sb, KERN_ERR, "can't mount with "
  2987. "both data=journal and dioread_nolock");
  2988. goto failed_mount;
  2989. }
  2990. if (test_opt(sb, DAX)) {
  2991. ext4_msg(sb, KERN_ERR, "can't mount with "
  2992. "both data=journal and dax");
  2993. goto failed_mount;
  2994. }
  2995. if (test_opt(sb, DELALLOC))
  2996. clear_opt(sb, DELALLOC);
  2997. } else {
  2998. sb->s_iflags |= SB_I_CGROUPWB;
  2999. }
  3000. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  3001. (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
  3002. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
  3003. (ext4_has_compat_features(sb) ||
  3004. ext4_has_ro_compat_features(sb) ||
  3005. ext4_has_incompat_features(sb)))
  3006. ext4_msg(sb, KERN_WARNING,
  3007. "feature flags set on rev 0 fs, "
  3008. "running e2fsck is recommended");
  3009. if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
  3010. set_opt2(sb, HURD_COMPAT);
  3011. if (ext4_has_feature_64bit(sb)) {
  3012. ext4_msg(sb, KERN_ERR,
  3013. "The Hurd can't support 64-bit file systems");
  3014. goto failed_mount;
  3015. }
  3016. }
  3017. if (IS_EXT2_SB(sb)) {
  3018. if (ext2_feature_set_ok(sb))
  3019. ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
  3020. "using the ext4 subsystem");
  3021. else {
  3022. ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
  3023. "to feature incompatibilities");
  3024. goto failed_mount;
  3025. }
  3026. }
  3027. if (IS_EXT3_SB(sb)) {
  3028. if (ext3_feature_set_ok(sb))
  3029. ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
  3030. "using the ext4 subsystem");
  3031. else {
  3032. ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
  3033. "to feature incompatibilities");
  3034. goto failed_mount;
  3035. }
  3036. }
  3037. /*
  3038. * Check feature flags regardless of the revision level, since we
  3039. * previously didn't change the revision level when setting the flags,
  3040. * so there is a chance incompat flags are set on a rev 0 filesystem.
  3041. */
  3042. if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
  3043. goto failed_mount;
  3044. blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
  3045. if (blocksize < EXT4_MIN_BLOCK_SIZE ||
  3046. blocksize > EXT4_MAX_BLOCK_SIZE) {
  3047. ext4_msg(sb, KERN_ERR,
  3048. "Unsupported filesystem blocksize %d", blocksize);
  3049. goto failed_mount;
  3050. }
  3051. if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
  3052. if (blocksize != PAGE_SIZE) {
  3053. ext4_msg(sb, KERN_ERR,
  3054. "error: unsupported blocksize for dax");
  3055. goto failed_mount;
  3056. }
  3057. if (!sb->s_bdev->bd_disk->fops->direct_access) {
  3058. ext4_msg(sb, KERN_ERR,
  3059. "error: device does not support dax");
  3060. goto failed_mount;
  3061. }
  3062. }
  3063. if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
  3064. ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
  3065. es->s_encryption_level);
  3066. goto failed_mount;
  3067. }
  3068. if (sb->s_blocksize != blocksize) {
  3069. /* Validate the filesystem blocksize */
  3070. if (!sb_set_blocksize(sb, blocksize)) {
  3071. ext4_msg(sb, KERN_ERR, "bad block size %d",
  3072. blocksize);
  3073. goto failed_mount;
  3074. }
  3075. brelse(bh);
  3076. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3077. offset = do_div(logical_sb_block, blocksize);
  3078. bh = sb_bread_unmovable(sb, logical_sb_block);
  3079. if (!bh) {
  3080. ext4_msg(sb, KERN_ERR,
  3081. "Can't read superblock on 2nd try");
  3082. goto failed_mount;
  3083. }
  3084. es = (struct ext4_super_block *)(bh->b_data + offset);
  3085. sbi->s_es = es;
  3086. if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
  3087. ext4_msg(sb, KERN_ERR,
  3088. "Magic mismatch, very weird!");
  3089. goto failed_mount;
  3090. }
  3091. }
  3092. has_huge_files = ext4_has_feature_huge_file(sb);
  3093. sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
  3094. has_huge_files);
  3095. sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
  3096. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
  3097. sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
  3098. sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
  3099. } else {
  3100. sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
  3101. sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
  3102. if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
  3103. (!is_power_of_2(sbi->s_inode_size)) ||
  3104. (sbi->s_inode_size > blocksize)) {
  3105. ext4_msg(sb, KERN_ERR,
  3106. "unsupported inode size: %d",
  3107. sbi->s_inode_size);
  3108. goto failed_mount;
  3109. }
  3110. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
  3111. sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
  3112. }
  3113. sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
  3114. if (ext4_has_feature_64bit(sb)) {
  3115. if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
  3116. sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
  3117. !is_power_of_2(sbi->s_desc_size)) {
  3118. ext4_msg(sb, KERN_ERR,
  3119. "unsupported descriptor size %lu",
  3120. sbi->s_desc_size);
  3121. goto failed_mount;
  3122. }
  3123. } else
  3124. sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
  3125. sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
  3126. sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
  3127. if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
  3128. goto cantfind_ext4;
  3129. sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
  3130. if (sbi->s_inodes_per_block == 0)
  3131. goto cantfind_ext4;
  3132. sbi->s_itb_per_group = sbi->s_inodes_per_group /
  3133. sbi->s_inodes_per_block;
  3134. sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
  3135. sbi->s_sbh = bh;
  3136. sbi->s_mount_state = le16_to_cpu(es->s_state);
  3137. sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
  3138. sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
  3139. for (i = 0; i < 4; i++)
  3140. sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
  3141. sbi->s_def_hash_version = es->s_def_hash_version;
  3142. if (ext4_has_feature_dir_index(sb)) {
  3143. i = le32_to_cpu(es->s_flags);
  3144. if (i & EXT2_FLAGS_UNSIGNED_HASH)
  3145. sbi->s_hash_unsigned = 3;
  3146. else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
  3147. #ifdef __CHAR_UNSIGNED__
  3148. if (!(sb->s_flags & MS_RDONLY))
  3149. es->s_flags |=
  3150. cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
  3151. sbi->s_hash_unsigned = 3;
  3152. #else
  3153. if (!(sb->s_flags & MS_RDONLY))
  3154. es->s_flags |=
  3155. cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
  3156. #endif
  3157. }
  3158. }
  3159. /* Handle clustersize */
  3160. clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
  3161. has_bigalloc = ext4_has_feature_bigalloc(sb);
  3162. if (has_bigalloc) {
  3163. if (clustersize < blocksize) {
  3164. ext4_msg(sb, KERN_ERR,
  3165. "cluster size (%d) smaller than "
  3166. "block size (%d)", clustersize, blocksize);
  3167. goto failed_mount;
  3168. }
  3169. sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
  3170. le32_to_cpu(es->s_log_block_size);
  3171. sbi->s_clusters_per_group =
  3172. le32_to_cpu(es->s_clusters_per_group);
  3173. if (sbi->s_clusters_per_group > blocksize * 8) {
  3174. ext4_msg(sb, KERN_ERR,
  3175. "#clusters per group too big: %lu",
  3176. sbi->s_clusters_per_group);
  3177. goto failed_mount;
  3178. }
  3179. if (sbi->s_blocks_per_group !=
  3180. (sbi->s_clusters_per_group * (clustersize / blocksize))) {
  3181. ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
  3182. "clusters per group (%lu) inconsistent",
  3183. sbi->s_blocks_per_group,
  3184. sbi->s_clusters_per_group);
  3185. goto failed_mount;
  3186. }
  3187. } else {
  3188. if (clustersize != blocksize) {
  3189. ext4_warning(sb, "fragment/cluster size (%d) != "
  3190. "block size (%d)", clustersize,
  3191. blocksize);
  3192. clustersize = blocksize;
  3193. }
  3194. if (sbi->s_blocks_per_group > blocksize * 8) {
  3195. ext4_msg(sb, KERN_ERR,
  3196. "#blocks per group too big: %lu",
  3197. sbi->s_blocks_per_group);
  3198. goto failed_mount;
  3199. }
  3200. sbi->s_clusters_per_group = sbi->s_blocks_per_group;
  3201. sbi->s_cluster_bits = 0;
  3202. }
  3203. sbi->s_cluster_ratio = clustersize / blocksize;
  3204. if (sbi->s_inodes_per_group > blocksize * 8) {
  3205. ext4_msg(sb, KERN_ERR,
  3206. "#inodes per group too big: %lu",
  3207. sbi->s_inodes_per_group);
  3208. goto failed_mount;
  3209. }
  3210. /* Do we have standard group size of clustersize * 8 blocks ? */
  3211. if (sbi->s_blocks_per_group == clustersize << 3)
  3212. set_opt2(sb, STD_GROUP_SIZE);
  3213. /*
  3214. * Test whether we have more sectors than will fit in sector_t,
  3215. * and whether the max offset is addressable by the page cache.
  3216. */
  3217. err = generic_check_addressable(sb->s_blocksize_bits,
  3218. ext4_blocks_count(es));
  3219. if (err) {
  3220. ext4_msg(sb, KERN_ERR, "filesystem"
  3221. " too large to mount safely on this system");
  3222. if (sizeof(sector_t) < 8)
  3223. ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
  3224. goto failed_mount;
  3225. }
  3226. if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
  3227. goto cantfind_ext4;
  3228. /* check blocks count against device size */
  3229. blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
  3230. if (blocks_count && ext4_blocks_count(es) > blocks_count) {
  3231. ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
  3232. "exceeds size of device (%llu blocks)",
  3233. ext4_blocks_count(es), blocks_count);
  3234. goto failed_mount;
  3235. }
  3236. /*
  3237. * It makes no sense for the first data block to be beyond the end
  3238. * of the filesystem.
  3239. */
  3240. if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
  3241. ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
  3242. "block %u is beyond end of filesystem (%llu)",
  3243. le32_to_cpu(es->s_first_data_block),
  3244. ext4_blocks_count(es));
  3245. goto failed_mount;
  3246. }
  3247. blocks_count = (ext4_blocks_count(es) -
  3248. le32_to_cpu(es->s_first_data_block) +
  3249. EXT4_BLOCKS_PER_GROUP(sb) - 1);
  3250. do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
  3251. if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
  3252. ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
  3253. "(block count %llu, first data block %u, "
  3254. "blocks per group %lu)", sbi->s_groups_count,
  3255. ext4_blocks_count(es),
  3256. le32_to_cpu(es->s_first_data_block),
  3257. EXT4_BLOCKS_PER_GROUP(sb));
  3258. goto failed_mount;
  3259. }
  3260. sbi->s_groups_count = blocks_count;
  3261. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  3262. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  3263. db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
  3264. EXT4_DESC_PER_BLOCK(sb);
  3265. sbi->s_group_desc = ext4_kvmalloc(db_count *
  3266. sizeof(struct buffer_head *),
  3267. GFP_KERNEL);
  3268. if (sbi->s_group_desc == NULL) {
  3269. ext4_msg(sb, KERN_ERR, "not enough memory");
  3270. ret = -ENOMEM;
  3271. goto failed_mount;
  3272. }
  3273. bgl_lock_init(sbi->s_blockgroup_lock);
  3274. for (i = 0; i < db_count; i++) {
  3275. block = descriptor_loc(sb, logical_sb_block, i);
  3276. sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
  3277. if (!sbi->s_group_desc[i]) {
  3278. ext4_msg(sb, KERN_ERR,
  3279. "can't read group descriptor %d", i);
  3280. db_count = i;
  3281. goto failed_mount2;
  3282. }
  3283. }
  3284. if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
  3285. ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
  3286. ret = -EFSCORRUPTED;
  3287. goto failed_mount2;
  3288. }
  3289. sbi->s_gdb_count = db_count;
  3290. get_random_bytes(&sbi->s_next_generation, sizeof(u32));
  3291. spin_lock_init(&sbi->s_next_gen_lock);
  3292. setup_timer(&sbi->s_err_report, print_daily_error_info,
  3293. (unsigned long) sb);
  3294. /* Register extent status tree shrinker */
  3295. if (ext4_es_register_shrinker(sbi))
  3296. goto failed_mount3;
  3297. sbi->s_stripe = ext4_get_stripe_size(sbi);
  3298. sbi->s_extent_max_zeroout_kb = 32;
  3299. /*
  3300. * set up enough so that it can read an inode
  3301. */
  3302. sb->s_op = &ext4_sops;
  3303. sb->s_export_op = &ext4_export_ops;
  3304. sb->s_xattr = ext4_xattr_handlers;
  3305. #ifdef CONFIG_QUOTA
  3306. sb->dq_op = &ext4_quota_operations;
  3307. if (ext4_has_feature_quota(sb))
  3308. sb->s_qcop = &dquot_quotactl_sysfile_ops;
  3309. else
  3310. sb->s_qcop = &ext4_qctl_operations;
  3311. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  3312. #endif
  3313. memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
  3314. INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
  3315. mutex_init(&sbi->s_orphan_lock);
  3316. sb->s_root = NULL;
  3317. needs_recovery = (es->s_last_orphan != 0 ||
  3318. ext4_has_feature_journal_needs_recovery(sb));
  3319. if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY))
  3320. if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
  3321. goto failed_mount3a;
  3322. /*
  3323. * The first inode we look at is the journal inode. Don't try
  3324. * root first: it may be modified in the journal!
  3325. */
  3326. if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
  3327. if (ext4_load_journal(sb, es, journal_devnum))
  3328. goto failed_mount3a;
  3329. } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
  3330. ext4_has_feature_journal_needs_recovery(sb)) {
  3331. ext4_msg(sb, KERN_ERR, "required journal recovery "
  3332. "suppressed and not mounted read-only");
  3333. goto failed_mount_wq;
  3334. } else {
  3335. /* Nojournal mode, all journal mount options are illegal */
  3336. if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
  3337. ext4_msg(sb, KERN_ERR, "can't mount with "
  3338. "journal_checksum, fs mounted w/o journal");
  3339. goto failed_mount_wq;
  3340. }
  3341. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3342. ext4_msg(sb, KERN_ERR, "can't mount with "
  3343. "journal_async_commit, fs mounted w/o journal");
  3344. goto failed_mount_wq;
  3345. }
  3346. if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
  3347. ext4_msg(sb, KERN_ERR, "can't mount with "
  3348. "commit=%lu, fs mounted w/o journal",
  3349. sbi->s_commit_interval / HZ);
  3350. goto failed_mount_wq;
  3351. }
  3352. if (EXT4_MOUNT_DATA_FLAGS &
  3353. (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
  3354. ext4_msg(sb, KERN_ERR, "can't mount with "
  3355. "data=, fs mounted w/o journal");
  3356. goto failed_mount_wq;
  3357. }
  3358. sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
  3359. clear_opt(sb, JOURNAL_CHECKSUM);
  3360. clear_opt(sb, DATA_FLAGS);
  3361. sbi->s_journal = NULL;
  3362. needs_recovery = 0;
  3363. goto no_journal;
  3364. }
  3365. if (ext4_has_feature_64bit(sb) &&
  3366. !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
  3367. JBD2_FEATURE_INCOMPAT_64BIT)) {
  3368. ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
  3369. goto failed_mount_wq;
  3370. }
  3371. if (!set_journal_csum_feature_set(sb)) {
  3372. ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
  3373. "feature set");
  3374. goto failed_mount_wq;
  3375. }
  3376. /* We have now updated the journal if required, so we can
  3377. * validate the data journaling mode. */
  3378. switch (test_opt(sb, DATA_FLAGS)) {
  3379. case 0:
  3380. /* No mode set, assume a default based on the journal
  3381. * capabilities: ORDERED_DATA if the journal can
  3382. * cope, else JOURNAL_DATA
  3383. */
  3384. if (jbd2_journal_check_available_features
  3385. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
  3386. set_opt(sb, ORDERED_DATA);
  3387. else
  3388. set_opt(sb, JOURNAL_DATA);
  3389. break;
  3390. case EXT4_MOUNT_ORDERED_DATA:
  3391. case EXT4_MOUNT_WRITEBACK_DATA:
  3392. if (!jbd2_journal_check_available_features
  3393. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
  3394. ext4_msg(sb, KERN_ERR, "Journal does not support "
  3395. "requested data journaling mode");
  3396. goto failed_mount_wq;
  3397. }
  3398. default:
  3399. break;
  3400. }
  3401. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  3402. sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
  3403. no_journal:
  3404. if (ext4_mballoc_ready) {
  3405. sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
  3406. if (!sbi->s_mb_cache) {
  3407. ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
  3408. goto failed_mount_wq;
  3409. }
  3410. }
  3411. if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
  3412. (blocksize != PAGE_CACHE_SIZE)) {
  3413. ext4_msg(sb, KERN_ERR,
  3414. "Unsupported blocksize for fs encryption");
  3415. goto failed_mount_wq;
  3416. }
  3417. if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) &&
  3418. !ext4_has_feature_encrypt(sb)) {
  3419. ext4_set_feature_encrypt(sb);
  3420. ext4_commit_super(sb, 1);
  3421. }
  3422. /*
  3423. * Get the # of file system overhead blocks from the
  3424. * superblock if present.
  3425. */
  3426. if (es->s_overhead_clusters)
  3427. sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
  3428. else {
  3429. err = ext4_calculate_overhead(sb);
  3430. if (err)
  3431. goto failed_mount_wq;
  3432. }
  3433. /*
  3434. * The maximum number of concurrent works can be high and
  3435. * concurrency isn't really necessary. Limit it to 1.
  3436. */
  3437. EXT4_SB(sb)->rsv_conversion_wq =
  3438. alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
  3439. if (!EXT4_SB(sb)->rsv_conversion_wq) {
  3440. printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
  3441. ret = -ENOMEM;
  3442. goto failed_mount4;
  3443. }
  3444. /*
  3445. * The jbd2_journal_load will have done any necessary log recovery,
  3446. * so we can safely mount the rest of the filesystem now.
  3447. */
  3448. root = ext4_iget(sb, EXT4_ROOT_INO);
  3449. if (IS_ERR(root)) {
  3450. ext4_msg(sb, KERN_ERR, "get root inode failed");
  3451. ret = PTR_ERR(root);
  3452. root = NULL;
  3453. goto failed_mount4;
  3454. }
  3455. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  3456. ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
  3457. iput(root);
  3458. goto failed_mount4;
  3459. }
  3460. sb->s_root = d_make_root(root);
  3461. if (!sb->s_root) {
  3462. ext4_msg(sb, KERN_ERR, "get root dentry failed");
  3463. ret = -ENOMEM;
  3464. goto failed_mount4;
  3465. }
  3466. if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
  3467. sb->s_flags |= MS_RDONLY;
  3468. /* determine the minimum size of new large inodes, if present */
  3469. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
  3470. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3471. EXT4_GOOD_OLD_INODE_SIZE;
  3472. if (ext4_has_feature_extra_isize(sb)) {
  3473. if (sbi->s_want_extra_isize <
  3474. le16_to_cpu(es->s_want_extra_isize))
  3475. sbi->s_want_extra_isize =
  3476. le16_to_cpu(es->s_want_extra_isize);
  3477. if (sbi->s_want_extra_isize <
  3478. le16_to_cpu(es->s_min_extra_isize))
  3479. sbi->s_want_extra_isize =
  3480. le16_to_cpu(es->s_min_extra_isize);
  3481. }
  3482. }
  3483. /* Check if enough inode space is available */
  3484. if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
  3485. sbi->s_inode_size) {
  3486. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3487. EXT4_GOOD_OLD_INODE_SIZE;
  3488. ext4_msg(sb, KERN_INFO, "required extra inode space not"
  3489. "available");
  3490. }
  3491. ext4_set_resv_clusters(sb);
  3492. err = ext4_setup_system_zone(sb);
  3493. if (err) {
  3494. ext4_msg(sb, KERN_ERR, "failed to initialize system "
  3495. "zone (%d)", err);
  3496. goto failed_mount4a;
  3497. }
  3498. ext4_ext_init(sb);
  3499. err = ext4_mb_init(sb);
  3500. if (err) {
  3501. ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
  3502. err);
  3503. goto failed_mount5;
  3504. }
  3505. block = ext4_count_free_clusters(sb);
  3506. ext4_free_blocks_count_set(sbi->s_es,
  3507. EXT4_C2B(sbi, block));
  3508. err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
  3509. GFP_KERNEL);
  3510. if (!err) {
  3511. unsigned long freei = ext4_count_free_inodes(sb);
  3512. sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
  3513. err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
  3514. GFP_KERNEL);
  3515. }
  3516. if (!err)
  3517. err = percpu_counter_init(&sbi->s_dirs_counter,
  3518. ext4_count_dirs(sb), GFP_KERNEL);
  3519. if (!err)
  3520. err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
  3521. GFP_KERNEL);
  3522. if (err) {
  3523. ext4_msg(sb, KERN_ERR, "insufficient memory");
  3524. goto failed_mount6;
  3525. }
  3526. if (ext4_has_feature_flex_bg(sb))
  3527. if (!ext4_fill_flex_info(sb)) {
  3528. ext4_msg(sb, KERN_ERR,
  3529. "unable to initialize "
  3530. "flex_bg meta info!");
  3531. goto failed_mount6;
  3532. }
  3533. err = ext4_register_li_request(sb, first_not_zeroed);
  3534. if (err)
  3535. goto failed_mount6;
  3536. err = ext4_register_sysfs(sb);
  3537. if (err)
  3538. goto failed_mount7;
  3539. #ifdef CONFIG_QUOTA
  3540. /* Enable quota usage during mount. */
  3541. if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) {
  3542. err = ext4_enable_quotas(sb);
  3543. if (err)
  3544. goto failed_mount8;
  3545. }
  3546. #endif /* CONFIG_QUOTA */
  3547. EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
  3548. ext4_orphan_cleanup(sb, es);
  3549. EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
  3550. if (needs_recovery) {
  3551. ext4_msg(sb, KERN_INFO, "recovery complete");
  3552. ext4_mark_recovery_complete(sb, es);
  3553. }
  3554. if (EXT4_SB(sb)->s_journal) {
  3555. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  3556. descr = " journalled data mode";
  3557. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  3558. descr = " ordered data mode";
  3559. else
  3560. descr = " writeback data mode";
  3561. } else
  3562. descr = "out journal";
  3563. if (test_opt(sb, DISCARD)) {
  3564. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  3565. if (!blk_queue_discard(q))
  3566. ext4_msg(sb, KERN_WARNING,
  3567. "mounting with \"discard\" option, but "
  3568. "the device does not support discard");
  3569. }
  3570. if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
  3571. ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
  3572. "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
  3573. *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
  3574. if (es->s_error_count)
  3575. mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
  3576. /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
  3577. ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
  3578. ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
  3579. ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
  3580. kfree(orig_data);
  3581. return 0;
  3582. cantfind_ext4:
  3583. if (!silent)
  3584. ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
  3585. goto failed_mount;
  3586. #ifdef CONFIG_QUOTA
  3587. failed_mount8:
  3588. ext4_unregister_sysfs(sb);
  3589. #endif
  3590. failed_mount7:
  3591. ext4_unregister_li_request(sb);
  3592. failed_mount6:
  3593. ext4_mb_release(sb);
  3594. if (sbi->s_flex_groups)
  3595. kvfree(sbi->s_flex_groups);
  3596. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  3597. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  3598. percpu_counter_destroy(&sbi->s_dirs_counter);
  3599. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  3600. failed_mount5:
  3601. ext4_ext_release(sb);
  3602. ext4_release_system_zone(sb);
  3603. failed_mount4a:
  3604. dput(sb->s_root);
  3605. sb->s_root = NULL;
  3606. failed_mount4:
  3607. ext4_msg(sb, KERN_ERR, "mount failed");
  3608. if (EXT4_SB(sb)->rsv_conversion_wq)
  3609. destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
  3610. failed_mount_wq:
  3611. if (sbi->s_journal) {
  3612. jbd2_journal_destroy(sbi->s_journal);
  3613. sbi->s_journal = NULL;
  3614. }
  3615. failed_mount3a:
  3616. ext4_es_unregister_shrinker(sbi);
  3617. failed_mount3:
  3618. del_timer_sync(&sbi->s_err_report);
  3619. if (sbi->s_mmp_tsk)
  3620. kthread_stop(sbi->s_mmp_tsk);
  3621. failed_mount2:
  3622. for (i = 0; i < db_count; i++)
  3623. brelse(sbi->s_group_desc[i]);
  3624. kvfree(sbi->s_group_desc);
  3625. failed_mount:
  3626. if (sbi->s_chksum_driver)
  3627. crypto_free_shash(sbi->s_chksum_driver);
  3628. #ifdef CONFIG_QUOTA
  3629. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  3630. kfree(sbi->s_qf_names[i]);
  3631. #endif
  3632. ext4_blkdev_remove(sbi);
  3633. brelse(bh);
  3634. out_fail:
  3635. sb->s_fs_info = NULL;
  3636. kfree(sbi->s_blockgroup_lock);
  3637. kfree(sbi);
  3638. out_free_orig:
  3639. kfree(orig_data);
  3640. return err ? err : ret;
  3641. }
  3642. /*
  3643. * Setup any per-fs journal parameters now. We'll do this both on
  3644. * initial mount, once the journal has been initialised but before we've
  3645. * done any recovery; and again on any subsequent remount.
  3646. */
  3647. static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
  3648. {
  3649. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3650. journal->j_commit_interval = sbi->s_commit_interval;
  3651. journal->j_min_batch_time = sbi->s_min_batch_time;
  3652. journal->j_max_batch_time = sbi->s_max_batch_time;
  3653. write_lock(&journal->j_state_lock);
  3654. if (test_opt(sb, BARRIER))
  3655. journal->j_flags |= JBD2_BARRIER;
  3656. else
  3657. journal->j_flags &= ~JBD2_BARRIER;
  3658. if (test_opt(sb, DATA_ERR_ABORT))
  3659. journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
  3660. else
  3661. journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
  3662. write_unlock(&journal->j_state_lock);
  3663. }
  3664. static journal_t *ext4_get_journal(struct super_block *sb,
  3665. unsigned int journal_inum)
  3666. {
  3667. struct inode *journal_inode;
  3668. journal_t *journal;
  3669. BUG_ON(!ext4_has_feature_journal(sb));
  3670. /* First, test for the existence of a valid inode on disk. Bad
  3671. * things happen if we iget() an unused inode, as the subsequent
  3672. * iput() will try to delete it. */
  3673. journal_inode = ext4_iget(sb, journal_inum);
  3674. if (IS_ERR(journal_inode)) {
  3675. ext4_msg(sb, KERN_ERR, "no journal found");
  3676. return NULL;
  3677. }
  3678. if (!journal_inode->i_nlink) {
  3679. make_bad_inode(journal_inode);
  3680. iput(journal_inode);
  3681. ext4_msg(sb, KERN_ERR, "journal inode is deleted");
  3682. return NULL;
  3683. }
  3684. jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
  3685. journal_inode, journal_inode->i_size);
  3686. if (!S_ISREG(journal_inode->i_mode)) {
  3687. ext4_msg(sb, KERN_ERR, "invalid journal inode");
  3688. iput(journal_inode);
  3689. return NULL;
  3690. }
  3691. journal = jbd2_journal_init_inode(journal_inode);
  3692. if (!journal) {
  3693. ext4_msg(sb, KERN_ERR, "Could not load journal inode");
  3694. iput(journal_inode);
  3695. return NULL;
  3696. }
  3697. journal->j_private = sb;
  3698. ext4_init_journal_params(sb, journal);
  3699. return journal;
  3700. }
  3701. static journal_t *ext4_get_dev_journal(struct super_block *sb,
  3702. dev_t j_dev)
  3703. {
  3704. struct buffer_head *bh;
  3705. journal_t *journal;
  3706. ext4_fsblk_t start;
  3707. ext4_fsblk_t len;
  3708. int hblock, blocksize;
  3709. ext4_fsblk_t sb_block;
  3710. unsigned long offset;
  3711. struct ext4_super_block *es;
  3712. struct block_device *bdev;
  3713. BUG_ON(!ext4_has_feature_journal(sb));
  3714. bdev = ext4_blkdev_get(j_dev, sb);
  3715. if (bdev == NULL)
  3716. return NULL;
  3717. blocksize = sb->s_blocksize;
  3718. hblock = bdev_logical_block_size(bdev);
  3719. if (blocksize < hblock) {
  3720. ext4_msg(sb, KERN_ERR,
  3721. "blocksize too small for journal device");
  3722. goto out_bdev;
  3723. }
  3724. sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
  3725. offset = EXT4_MIN_BLOCK_SIZE % blocksize;
  3726. set_blocksize(bdev, blocksize);
  3727. if (!(bh = __bread(bdev, sb_block, blocksize))) {
  3728. ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
  3729. "external journal");
  3730. goto out_bdev;
  3731. }
  3732. es = (struct ext4_super_block *) (bh->b_data + offset);
  3733. if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
  3734. !(le32_to_cpu(es->s_feature_incompat) &
  3735. EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
  3736. ext4_msg(sb, KERN_ERR, "external journal has "
  3737. "bad superblock");
  3738. brelse(bh);
  3739. goto out_bdev;
  3740. }
  3741. if ((le32_to_cpu(es->s_feature_ro_compat) &
  3742. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
  3743. es->s_checksum != ext4_superblock_csum(sb, es)) {
  3744. ext4_msg(sb, KERN_ERR, "external journal has "
  3745. "corrupt superblock");
  3746. brelse(bh);
  3747. goto out_bdev;
  3748. }
  3749. if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
  3750. ext4_msg(sb, KERN_ERR, "journal UUID does not match");
  3751. brelse(bh);
  3752. goto out_bdev;
  3753. }
  3754. len = ext4_blocks_count(es);
  3755. start = sb_block + 1;
  3756. brelse(bh); /* we're done with the superblock */
  3757. journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
  3758. start, len, blocksize);
  3759. if (!journal) {
  3760. ext4_msg(sb, KERN_ERR, "failed to create device journal");
  3761. goto out_bdev;
  3762. }
  3763. journal->j_private = sb;
  3764. ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
  3765. wait_on_buffer(journal->j_sb_buffer);
  3766. if (!buffer_uptodate(journal->j_sb_buffer)) {
  3767. ext4_msg(sb, KERN_ERR, "I/O error on journal device");
  3768. goto out_journal;
  3769. }
  3770. if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
  3771. ext4_msg(sb, KERN_ERR, "External journal has more than one "
  3772. "user (unsupported) - %d",
  3773. be32_to_cpu(journal->j_superblock->s_nr_users));
  3774. goto out_journal;
  3775. }
  3776. EXT4_SB(sb)->journal_bdev = bdev;
  3777. ext4_init_journal_params(sb, journal);
  3778. return journal;
  3779. out_journal:
  3780. jbd2_journal_destroy(journal);
  3781. out_bdev:
  3782. ext4_blkdev_put(bdev);
  3783. return NULL;
  3784. }
  3785. static int ext4_load_journal(struct super_block *sb,
  3786. struct ext4_super_block *es,
  3787. unsigned long journal_devnum)
  3788. {
  3789. journal_t *journal;
  3790. unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
  3791. dev_t journal_dev;
  3792. int err = 0;
  3793. int really_read_only;
  3794. BUG_ON(!ext4_has_feature_journal(sb));
  3795. if (journal_devnum &&
  3796. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  3797. ext4_msg(sb, KERN_INFO, "external journal device major/minor "
  3798. "numbers have changed");
  3799. journal_dev = new_decode_dev(journal_devnum);
  3800. } else
  3801. journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
  3802. really_read_only = bdev_read_only(sb->s_bdev);
  3803. /*
  3804. * Are we loading a blank journal or performing recovery after a
  3805. * crash? For recovery, we need to check in advance whether we
  3806. * can get read-write access to the device.
  3807. */
  3808. if (ext4_has_feature_journal_needs_recovery(sb)) {
  3809. if (sb->s_flags & MS_RDONLY) {
  3810. ext4_msg(sb, KERN_INFO, "INFO: recovery "
  3811. "required on readonly filesystem");
  3812. if (really_read_only) {
  3813. ext4_msg(sb, KERN_ERR, "write access "
  3814. "unavailable, cannot proceed");
  3815. return -EROFS;
  3816. }
  3817. ext4_msg(sb, KERN_INFO, "write access will "
  3818. "be enabled during recovery");
  3819. }
  3820. }
  3821. if (journal_inum && journal_dev) {
  3822. ext4_msg(sb, KERN_ERR, "filesystem has both journal "
  3823. "and inode journals!");
  3824. return -EINVAL;
  3825. }
  3826. if (journal_inum) {
  3827. if (!(journal = ext4_get_journal(sb, journal_inum)))
  3828. return -EINVAL;
  3829. } else {
  3830. if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
  3831. return -EINVAL;
  3832. }
  3833. if (!(journal->j_flags & JBD2_BARRIER))
  3834. ext4_msg(sb, KERN_INFO, "barriers disabled");
  3835. if (!ext4_has_feature_journal_needs_recovery(sb))
  3836. err = jbd2_journal_wipe(journal, !really_read_only);
  3837. if (!err) {
  3838. char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
  3839. if (save)
  3840. memcpy(save, ((char *) es) +
  3841. EXT4_S_ERR_START, EXT4_S_ERR_LEN);
  3842. err = jbd2_journal_load(journal);
  3843. if (save)
  3844. memcpy(((char *) es) + EXT4_S_ERR_START,
  3845. save, EXT4_S_ERR_LEN);
  3846. kfree(save);
  3847. }
  3848. if (err) {
  3849. ext4_msg(sb, KERN_ERR, "error loading journal");
  3850. jbd2_journal_destroy(journal);
  3851. return err;
  3852. }
  3853. EXT4_SB(sb)->s_journal = journal;
  3854. ext4_clear_journal_err(sb, es);
  3855. if (!really_read_only && journal_devnum &&
  3856. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  3857. es->s_journal_dev = cpu_to_le32(journal_devnum);
  3858. /* Make sure we flush the recovery flag to disk. */
  3859. ext4_commit_super(sb, 1);
  3860. }
  3861. return 0;
  3862. }
  3863. static int ext4_commit_super(struct super_block *sb, int sync)
  3864. {
  3865. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  3866. struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
  3867. int error = 0;
  3868. if (!sbh || block_device_ejected(sb))
  3869. return error;
  3870. if (buffer_write_io_error(sbh)) {
  3871. /*
  3872. * Oh, dear. A previous attempt to write the
  3873. * superblock failed. This could happen because the
  3874. * USB device was yanked out. Or it could happen to
  3875. * be a transient write error and maybe the block will
  3876. * be remapped. Nothing we can do but to retry the
  3877. * write and hope for the best.
  3878. */
  3879. ext4_msg(sb, KERN_ERR, "previous I/O error to "
  3880. "superblock detected");
  3881. clear_buffer_write_io_error(sbh);
  3882. set_buffer_uptodate(sbh);
  3883. }
  3884. /*
  3885. * If the file system is mounted read-only, don't update the
  3886. * superblock write time. This avoids updating the superblock
  3887. * write time when we are mounting the root file system
  3888. * read/only but we need to replay the journal; at that point,
  3889. * for people who are east of GMT and who make their clock
  3890. * tick in localtime for Windows bug-for-bug compatibility,
  3891. * the clock is set in the future, and this will cause e2fsck
  3892. * to complain and force a full file system check.
  3893. */
  3894. if (!(sb->s_flags & MS_RDONLY))
  3895. es->s_wtime = cpu_to_le32(get_seconds());
  3896. if (sb->s_bdev->bd_part)
  3897. es->s_kbytes_written =
  3898. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
  3899. ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
  3900. EXT4_SB(sb)->s_sectors_written_start) >> 1));
  3901. else
  3902. es->s_kbytes_written =
  3903. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
  3904. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
  3905. ext4_free_blocks_count_set(es,
  3906. EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
  3907. &EXT4_SB(sb)->s_freeclusters_counter)));
  3908. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
  3909. es->s_free_inodes_count =
  3910. cpu_to_le32(percpu_counter_sum_positive(
  3911. &EXT4_SB(sb)->s_freeinodes_counter));
  3912. BUFFER_TRACE(sbh, "marking dirty");
  3913. ext4_superblock_csum_set(sb);
  3914. mark_buffer_dirty(sbh);
  3915. if (sync) {
  3916. error = __sync_dirty_buffer(sbh,
  3917. test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
  3918. if (error)
  3919. return error;
  3920. error = buffer_write_io_error(sbh);
  3921. if (error) {
  3922. ext4_msg(sb, KERN_ERR, "I/O error while writing "
  3923. "superblock");
  3924. clear_buffer_write_io_error(sbh);
  3925. set_buffer_uptodate(sbh);
  3926. }
  3927. }
  3928. return error;
  3929. }
  3930. /*
  3931. * Have we just finished recovery? If so, and if we are mounting (or
  3932. * remounting) the filesystem readonly, then we will end up with a
  3933. * consistent fs on disk. Record that fact.
  3934. */
  3935. static void ext4_mark_recovery_complete(struct super_block *sb,
  3936. struct ext4_super_block *es)
  3937. {
  3938. journal_t *journal = EXT4_SB(sb)->s_journal;
  3939. if (!ext4_has_feature_journal(sb)) {
  3940. BUG_ON(journal != NULL);
  3941. return;
  3942. }
  3943. jbd2_journal_lock_updates(journal);
  3944. if (jbd2_journal_flush(journal) < 0)
  3945. goto out;
  3946. if (ext4_has_feature_journal_needs_recovery(sb) &&
  3947. sb->s_flags & MS_RDONLY) {
  3948. ext4_clear_feature_journal_needs_recovery(sb);
  3949. ext4_commit_super(sb, 1);
  3950. }
  3951. out:
  3952. jbd2_journal_unlock_updates(journal);
  3953. }
  3954. /*
  3955. * If we are mounting (or read-write remounting) a filesystem whose journal
  3956. * has recorded an error from a previous lifetime, move that error to the
  3957. * main filesystem now.
  3958. */
  3959. static void ext4_clear_journal_err(struct super_block *sb,
  3960. struct ext4_super_block *es)
  3961. {
  3962. journal_t *journal;
  3963. int j_errno;
  3964. const char *errstr;
  3965. BUG_ON(!ext4_has_feature_journal(sb));
  3966. journal = EXT4_SB(sb)->s_journal;
  3967. /*
  3968. * Now check for any error status which may have been recorded in the
  3969. * journal by a prior ext4_error() or ext4_abort()
  3970. */
  3971. j_errno = jbd2_journal_errno(journal);
  3972. if (j_errno) {
  3973. char nbuf[16];
  3974. errstr = ext4_decode_error(sb, j_errno, nbuf);
  3975. ext4_warning(sb, "Filesystem error recorded "
  3976. "from previous mount: %s", errstr);
  3977. ext4_warning(sb, "Marking fs in need of filesystem check.");
  3978. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  3979. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  3980. ext4_commit_super(sb, 1);
  3981. jbd2_journal_clear_err(journal);
  3982. jbd2_journal_update_sb_errno(journal);
  3983. }
  3984. }
  3985. /*
  3986. * Force the running and committing transactions to commit,
  3987. * and wait on the commit.
  3988. */
  3989. int ext4_force_commit(struct super_block *sb)
  3990. {
  3991. journal_t *journal;
  3992. if (sb->s_flags & MS_RDONLY)
  3993. return 0;
  3994. journal = EXT4_SB(sb)->s_journal;
  3995. return ext4_journal_force_commit(journal);
  3996. }
  3997. static int ext4_sync_fs(struct super_block *sb, int wait)
  3998. {
  3999. int ret = 0;
  4000. tid_t target;
  4001. bool needs_barrier = false;
  4002. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4003. trace_ext4_sync_fs(sb, wait);
  4004. flush_workqueue(sbi->rsv_conversion_wq);
  4005. /*
  4006. * Writeback quota in non-journalled quota case - journalled quota has
  4007. * no dirty dquots
  4008. */
  4009. dquot_writeback_dquots(sb, -1);
  4010. /*
  4011. * Data writeback is possible w/o journal transaction, so barrier must
  4012. * being sent at the end of the function. But we can skip it if
  4013. * transaction_commit will do it for us.
  4014. */
  4015. if (sbi->s_journal) {
  4016. target = jbd2_get_latest_transaction(sbi->s_journal);
  4017. if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
  4018. !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
  4019. needs_barrier = true;
  4020. if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
  4021. if (wait)
  4022. ret = jbd2_log_wait_commit(sbi->s_journal,
  4023. target);
  4024. }
  4025. } else if (wait && test_opt(sb, BARRIER))
  4026. needs_barrier = true;
  4027. if (needs_barrier) {
  4028. int err;
  4029. err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
  4030. if (!ret)
  4031. ret = err;
  4032. }
  4033. return ret;
  4034. }
  4035. /*
  4036. * LVM calls this function before a (read-only) snapshot is created. This
  4037. * gives us a chance to flush the journal completely and mark the fs clean.
  4038. *
  4039. * Note that only this function cannot bring a filesystem to be in a clean
  4040. * state independently. It relies on upper layer to stop all data & metadata
  4041. * modifications.
  4042. */
  4043. static int ext4_freeze(struct super_block *sb)
  4044. {
  4045. int error = 0;
  4046. journal_t *journal;
  4047. if (sb->s_flags & MS_RDONLY)
  4048. return 0;
  4049. journal = EXT4_SB(sb)->s_journal;
  4050. if (journal) {
  4051. /* Now we set up the journal barrier. */
  4052. jbd2_journal_lock_updates(journal);
  4053. /*
  4054. * Don't clear the needs_recovery flag if we failed to
  4055. * flush the journal.
  4056. */
  4057. error = jbd2_journal_flush(journal);
  4058. if (error < 0)
  4059. goto out;
  4060. /* Journal blocked and flushed, clear needs_recovery flag. */
  4061. ext4_clear_feature_journal_needs_recovery(sb);
  4062. }
  4063. error = ext4_commit_super(sb, 1);
  4064. out:
  4065. if (journal)
  4066. /* we rely on upper layer to stop further updates */
  4067. jbd2_journal_unlock_updates(journal);
  4068. return error;
  4069. }
  4070. /*
  4071. * Called by LVM after the snapshot is done. We need to reset the RECOVER
  4072. * flag here, even though the filesystem is not technically dirty yet.
  4073. */
  4074. static int ext4_unfreeze(struct super_block *sb)
  4075. {
  4076. if (sb->s_flags & MS_RDONLY)
  4077. return 0;
  4078. if (EXT4_SB(sb)->s_journal) {
  4079. /* Reset the needs_recovery flag before the fs is unlocked. */
  4080. ext4_set_feature_journal_needs_recovery(sb);
  4081. }
  4082. ext4_commit_super(sb, 1);
  4083. return 0;
  4084. }
  4085. /*
  4086. * Structure to save mount options for ext4_remount's benefit
  4087. */
  4088. struct ext4_mount_options {
  4089. unsigned long s_mount_opt;
  4090. unsigned long s_mount_opt2;
  4091. kuid_t s_resuid;
  4092. kgid_t s_resgid;
  4093. unsigned long s_commit_interval;
  4094. u32 s_min_batch_time, s_max_batch_time;
  4095. #ifdef CONFIG_QUOTA
  4096. int s_jquota_fmt;
  4097. char *s_qf_names[EXT4_MAXQUOTAS];
  4098. #endif
  4099. };
  4100. static int ext4_remount(struct super_block *sb, int *flags, char *data)
  4101. {
  4102. struct ext4_super_block *es;
  4103. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4104. unsigned long old_sb_flags;
  4105. struct ext4_mount_options old_opts;
  4106. int enable_quota = 0;
  4107. ext4_group_t g;
  4108. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  4109. int err = 0;
  4110. #ifdef CONFIG_QUOTA
  4111. int i, j;
  4112. #endif
  4113. char *orig_data = kstrdup(data, GFP_KERNEL);
  4114. /* Store the original options */
  4115. old_sb_flags = sb->s_flags;
  4116. old_opts.s_mount_opt = sbi->s_mount_opt;
  4117. old_opts.s_mount_opt2 = sbi->s_mount_opt2;
  4118. old_opts.s_resuid = sbi->s_resuid;
  4119. old_opts.s_resgid = sbi->s_resgid;
  4120. old_opts.s_commit_interval = sbi->s_commit_interval;
  4121. old_opts.s_min_batch_time = sbi->s_min_batch_time;
  4122. old_opts.s_max_batch_time = sbi->s_max_batch_time;
  4123. #ifdef CONFIG_QUOTA
  4124. old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
  4125. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4126. if (sbi->s_qf_names[i]) {
  4127. old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
  4128. GFP_KERNEL);
  4129. if (!old_opts.s_qf_names[i]) {
  4130. for (j = 0; j < i; j++)
  4131. kfree(old_opts.s_qf_names[j]);
  4132. kfree(orig_data);
  4133. return -ENOMEM;
  4134. }
  4135. } else
  4136. old_opts.s_qf_names[i] = NULL;
  4137. #endif
  4138. if (sbi->s_journal && sbi->s_journal->j_task->io_context)
  4139. journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
  4140. if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
  4141. err = -EINVAL;
  4142. goto restore_opts;
  4143. }
  4144. if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
  4145. test_opt(sb, JOURNAL_CHECKSUM)) {
  4146. ext4_msg(sb, KERN_ERR, "changing journal_checksum "
  4147. "during remount not supported; ignoring");
  4148. sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
  4149. }
  4150. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  4151. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  4152. ext4_msg(sb, KERN_ERR, "can't mount with "
  4153. "both data=journal and delalloc");
  4154. err = -EINVAL;
  4155. goto restore_opts;
  4156. }
  4157. if (test_opt(sb, DIOREAD_NOLOCK)) {
  4158. ext4_msg(sb, KERN_ERR, "can't mount with "
  4159. "both data=journal and dioread_nolock");
  4160. err = -EINVAL;
  4161. goto restore_opts;
  4162. }
  4163. if (test_opt(sb, DAX)) {
  4164. ext4_msg(sb, KERN_ERR, "can't mount with "
  4165. "both data=journal and dax");
  4166. err = -EINVAL;
  4167. goto restore_opts;
  4168. }
  4169. }
  4170. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
  4171. ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
  4172. "dax flag with busy inodes while remounting");
  4173. sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
  4174. }
  4175. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
  4176. ext4_abort(sb, "Abort forced by user");
  4177. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  4178. (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
  4179. es = sbi->s_es;
  4180. if (sbi->s_journal) {
  4181. ext4_init_journal_params(sb, sbi->s_journal);
  4182. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  4183. }
  4184. if (*flags & MS_LAZYTIME)
  4185. sb->s_flags |= MS_LAZYTIME;
  4186. if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
  4187. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
  4188. err = -EROFS;
  4189. goto restore_opts;
  4190. }
  4191. if (*flags & MS_RDONLY) {
  4192. err = sync_filesystem(sb);
  4193. if (err < 0)
  4194. goto restore_opts;
  4195. err = dquot_suspend(sb, -1);
  4196. if (err < 0)
  4197. goto restore_opts;
  4198. /*
  4199. * First of all, the unconditional stuff we have to do
  4200. * to disable replay of the journal when we next remount
  4201. */
  4202. sb->s_flags |= MS_RDONLY;
  4203. /*
  4204. * OK, test if we are remounting a valid rw partition
  4205. * readonly, and if so set the rdonly flag and then
  4206. * mark the partition as valid again.
  4207. */
  4208. if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
  4209. (sbi->s_mount_state & EXT4_VALID_FS))
  4210. es->s_state = cpu_to_le16(sbi->s_mount_state);
  4211. if (sbi->s_journal)
  4212. ext4_mark_recovery_complete(sb, es);
  4213. } else {
  4214. /* Make sure we can mount this feature set readwrite */
  4215. if (ext4_has_feature_readonly(sb) ||
  4216. !ext4_feature_set_ok(sb, 0)) {
  4217. err = -EROFS;
  4218. goto restore_opts;
  4219. }
  4220. /*
  4221. * Make sure the group descriptor checksums
  4222. * are sane. If they aren't, refuse to remount r/w.
  4223. */
  4224. for (g = 0; g < sbi->s_groups_count; g++) {
  4225. struct ext4_group_desc *gdp =
  4226. ext4_get_group_desc(sb, g, NULL);
  4227. if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
  4228. ext4_msg(sb, KERN_ERR,
  4229. "ext4_remount: Checksum for group %u failed (%u!=%u)",
  4230. g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
  4231. le16_to_cpu(gdp->bg_checksum));
  4232. err = -EFSBADCRC;
  4233. goto restore_opts;
  4234. }
  4235. }
  4236. /*
  4237. * If we have an unprocessed orphan list hanging
  4238. * around from a previously readonly bdev mount,
  4239. * require a full umount/remount for now.
  4240. */
  4241. if (es->s_last_orphan) {
  4242. ext4_msg(sb, KERN_WARNING, "Couldn't "
  4243. "remount RDWR because of unprocessed "
  4244. "orphan inode list. Please "
  4245. "umount/remount instead");
  4246. err = -EINVAL;
  4247. goto restore_opts;
  4248. }
  4249. /*
  4250. * Mounting a RDONLY partition read-write, so reread
  4251. * and store the current valid flag. (It may have
  4252. * been changed by e2fsck since we originally mounted
  4253. * the partition.)
  4254. */
  4255. if (sbi->s_journal)
  4256. ext4_clear_journal_err(sb, es);
  4257. sbi->s_mount_state = le16_to_cpu(es->s_state);
  4258. if (!ext4_setup_super(sb, es, 0))
  4259. sb->s_flags &= ~MS_RDONLY;
  4260. if (ext4_has_feature_mmp(sb))
  4261. if (ext4_multi_mount_protect(sb,
  4262. le64_to_cpu(es->s_mmp_block))) {
  4263. err = -EROFS;
  4264. goto restore_opts;
  4265. }
  4266. enable_quota = 1;
  4267. }
  4268. }
  4269. /*
  4270. * Reinitialize lazy itable initialization thread based on
  4271. * current settings
  4272. */
  4273. if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
  4274. ext4_unregister_li_request(sb);
  4275. else {
  4276. ext4_group_t first_not_zeroed;
  4277. first_not_zeroed = ext4_has_uninit_itable(sb);
  4278. ext4_register_li_request(sb, first_not_zeroed);
  4279. }
  4280. ext4_setup_system_zone(sb);
  4281. if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
  4282. ext4_commit_super(sb, 1);
  4283. #ifdef CONFIG_QUOTA
  4284. /* Release old quota file names */
  4285. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4286. kfree(old_opts.s_qf_names[i]);
  4287. if (enable_quota) {
  4288. if (sb_any_quota_suspended(sb))
  4289. dquot_resume(sb, -1);
  4290. else if (ext4_has_feature_quota(sb)) {
  4291. err = ext4_enable_quotas(sb);
  4292. if (err)
  4293. goto restore_opts;
  4294. }
  4295. }
  4296. #endif
  4297. *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
  4298. ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
  4299. kfree(orig_data);
  4300. return 0;
  4301. restore_opts:
  4302. sb->s_flags = old_sb_flags;
  4303. sbi->s_mount_opt = old_opts.s_mount_opt;
  4304. sbi->s_mount_opt2 = old_opts.s_mount_opt2;
  4305. sbi->s_resuid = old_opts.s_resuid;
  4306. sbi->s_resgid = old_opts.s_resgid;
  4307. sbi->s_commit_interval = old_opts.s_commit_interval;
  4308. sbi->s_min_batch_time = old_opts.s_min_batch_time;
  4309. sbi->s_max_batch_time = old_opts.s_max_batch_time;
  4310. #ifdef CONFIG_QUOTA
  4311. sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
  4312. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  4313. kfree(sbi->s_qf_names[i]);
  4314. sbi->s_qf_names[i] = old_opts.s_qf_names[i];
  4315. }
  4316. #endif
  4317. kfree(orig_data);
  4318. return err;
  4319. }
  4320. #ifdef CONFIG_QUOTA
  4321. static int ext4_statfs_project(struct super_block *sb,
  4322. kprojid_t projid, struct kstatfs *buf)
  4323. {
  4324. struct kqid qid;
  4325. struct dquot *dquot;
  4326. u64 limit;
  4327. u64 curblock;
  4328. qid = make_kqid_projid(projid);
  4329. dquot = dqget(sb, qid);
  4330. if (IS_ERR(dquot))
  4331. return PTR_ERR(dquot);
  4332. spin_lock(&dq_data_lock);
  4333. limit = (dquot->dq_dqb.dqb_bsoftlimit ?
  4334. dquot->dq_dqb.dqb_bsoftlimit :
  4335. dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
  4336. if (limit && buf->f_blocks > limit) {
  4337. curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
  4338. buf->f_blocks = limit;
  4339. buf->f_bfree = buf->f_bavail =
  4340. (buf->f_blocks > curblock) ?
  4341. (buf->f_blocks - curblock) : 0;
  4342. }
  4343. limit = dquot->dq_dqb.dqb_isoftlimit ?
  4344. dquot->dq_dqb.dqb_isoftlimit :
  4345. dquot->dq_dqb.dqb_ihardlimit;
  4346. if (limit && buf->f_files > limit) {
  4347. buf->f_files = limit;
  4348. buf->f_ffree =
  4349. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  4350. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  4351. }
  4352. spin_unlock(&dq_data_lock);
  4353. dqput(dquot);
  4354. return 0;
  4355. }
  4356. #endif
  4357. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
  4358. {
  4359. struct super_block *sb = dentry->d_sb;
  4360. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4361. struct ext4_super_block *es = sbi->s_es;
  4362. ext4_fsblk_t overhead = 0, resv_blocks;
  4363. u64 fsid;
  4364. s64 bfree;
  4365. resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
  4366. if (!test_opt(sb, MINIX_DF))
  4367. overhead = sbi->s_overhead;
  4368. buf->f_type = EXT4_SUPER_MAGIC;
  4369. buf->f_bsize = sb->s_blocksize;
  4370. buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
  4371. bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
  4372. percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
  4373. /* prevent underflow in case that few free space is available */
  4374. buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
  4375. buf->f_bavail = buf->f_bfree -
  4376. (ext4_r_blocks_count(es) + resv_blocks);
  4377. if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
  4378. buf->f_bavail = 0;
  4379. buf->f_files = le32_to_cpu(es->s_inodes_count);
  4380. buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
  4381. buf->f_namelen = EXT4_NAME_LEN;
  4382. fsid = le64_to_cpup((void *)es->s_uuid) ^
  4383. le64_to_cpup((void *)es->s_uuid + sizeof(u64));
  4384. buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
  4385. buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
  4386. #ifdef CONFIG_QUOTA
  4387. if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
  4388. sb_has_quota_limits_enabled(sb, PRJQUOTA))
  4389. ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
  4390. #endif
  4391. return 0;
  4392. }
  4393. /* Helper function for writing quotas on sync - we need to start transaction
  4394. * before quota file is locked for write. Otherwise the are possible deadlocks:
  4395. * Process 1 Process 2
  4396. * ext4_create() quota_sync()
  4397. * jbd2_journal_start() write_dquot()
  4398. * dquot_initialize() down(dqio_mutex)
  4399. * down(dqio_mutex) jbd2_journal_start()
  4400. *
  4401. */
  4402. #ifdef CONFIG_QUOTA
  4403. static inline struct inode *dquot_to_inode(struct dquot *dquot)
  4404. {
  4405. return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
  4406. }
  4407. static int ext4_write_dquot(struct dquot *dquot)
  4408. {
  4409. int ret, err;
  4410. handle_t *handle;
  4411. struct inode *inode;
  4412. inode = dquot_to_inode(dquot);
  4413. handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
  4414. EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
  4415. if (IS_ERR(handle))
  4416. return PTR_ERR(handle);
  4417. ret = dquot_commit(dquot);
  4418. err = ext4_journal_stop(handle);
  4419. if (!ret)
  4420. ret = err;
  4421. return ret;
  4422. }
  4423. static int ext4_acquire_dquot(struct dquot *dquot)
  4424. {
  4425. int ret, err;
  4426. handle_t *handle;
  4427. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4428. EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
  4429. if (IS_ERR(handle))
  4430. return PTR_ERR(handle);
  4431. ret = dquot_acquire(dquot);
  4432. err = ext4_journal_stop(handle);
  4433. if (!ret)
  4434. ret = err;
  4435. return ret;
  4436. }
  4437. static int ext4_release_dquot(struct dquot *dquot)
  4438. {
  4439. int ret, err;
  4440. handle_t *handle;
  4441. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4442. EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
  4443. if (IS_ERR(handle)) {
  4444. /* Release dquot anyway to avoid endless cycle in dqput() */
  4445. dquot_release(dquot);
  4446. return PTR_ERR(handle);
  4447. }
  4448. ret = dquot_release(dquot);
  4449. err = ext4_journal_stop(handle);
  4450. if (!ret)
  4451. ret = err;
  4452. return ret;
  4453. }
  4454. static int ext4_mark_dquot_dirty(struct dquot *dquot)
  4455. {
  4456. struct super_block *sb = dquot->dq_sb;
  4457. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4458. /* Are we journaling quotas? */
  4459. if (ext4_has_feature_quota(sb) ||
  4460. sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  4461. dquot_mark_dquot_dirty(dquot);
  4462. return ext4_write_dquot(dquot);
  4463. } else {
  4464. return dquot_mark_dquot_dirty(dquot);
  4465. }
  4466. }
  4467. static int ext4_write_info(struct super_block *sb, int type)
  4468. {
  4469. int ret, err;
  4470. handle_t *handle;
  4471. /* Data block + inode block */
  4472. handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
  4473. if (IS_ERR(handle))
  4474. return PTR_ERR(handle);
  4475. ret = dquot_commit_info(sb, type);
  4476. err = ext4_journal_stop(handle);
  4477. if (!ret)
  4478. ret = err;
  4479. return ret;
  4480. }
  4481. /*
  4482. * Turn on quotas during mount time - we need to find
  4483. * the quota file and such...
  4484. */
  4485. static int ext4_quota_on_mount(struct super_block *sb, int type)
  4486. {
  4487. return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
  4488. EXT4_SB(sb)->s_jquota_fmt, type);
  4489. }
  4490. /*
  4491. * Standard function to be called on quota_on
  4492. */
  4493. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  4494. struct path *path)
  4495. {
  4496. int err;
  4497. if (!test_opt(sb, QUOTA))
  4498. return -EINVAL;
  4499. /* Quotafile not on the same filesystem? */
  4500. if (path->dentry->d_sb != sb)
  4501. return -EXDEV;
  4502. /* Journaling quota? */
  4503. if (EXT4_SB(sb)->s_qf_names[type]) {
  4504. /* Quotafile not in fs root? */
  4505. if (path->dentry->d_parent != sb->s_root)
  4506. ext4_msg(sb, KERN_WARNING,
  4507. "Quota file not on filesystem root. "
  4508. "Journaled quota will not work");
  4509. }
  4510. /*
  4511. * When we journal data on quota file, we have to flush journal to see
  4512. * all updates to the file when we bypass pagecache...
  4513. */
  4514. if (EXT4_SB(sb)->s_journal &&
  4515. ext4_should_journal_data(d_inode(path->dentry))) {
  4516. /*
  4517. * We don't need to lock updates but journal_flush() could
  4518. * otherwise be livelocked...
  4519. */
  4520. jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
  4521. err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
  4522. jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
  4523. if (err)
  4524. return err;
  4525. }
  4526. return dquot_quota_on(sb, type, format_id, path);
  4527. }
  4528. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  4529. unsigned int flags)
  4530. {
  4531. int err;
  4532. struct inode *qf_inode;
  4533. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4534. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4535. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4536. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4537. };
  4538. BUG_ON(!ext4_has_feature_quota(sb));
  4539. if (!qf_inums[type])
  4540. return -EPERM;
  4541. qf_inode = ext4_iget(sb, qf_inums[type]);
  4542. if (IS_ERR(qf_inode)) {
  4543. ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
  4544. return PTR_ERR(qf_inode);
  4545. }
  4546. /* Don't account quota for quota files to avoid recursion */
  4547. qf_inode->i_flags |= S_NOQUOTA;
  4548. err = dquot_enable(qf_inode, type, format_id, flags);
  4549. iput(qf_inode);
  4550. return err;
  4551. }
  4552. /* Enable usage tracking for all quota types. */
  4553. static int ext4_enable_quotas(struct super_block *sb)
  4554. {
  4555. int type, err = 0;
  4556. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4557. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4558. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4559. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4560. };
  4561. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
  4562. for (type = 0; type < EXT4_MAXQUOTAS; type++) {
  4563. if (qf_inums[type]) {
  4564. err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
  4565. DQUOT_USAGE_ENABLED);
  4566. if (err) {
  4567. ext4_warning(sb,
  4568. "Failed to enable quota tracking "
  4569. "(type=%d, err=%d). Please run "
  4570. "e2fsck to fix.", type, err);
  4571. return err;
  4572. }
  4573. }
  4574. }
  4575. return 0;
  4576. }
  4577. static int ext4_quota_off(struct super_block *sb, int type)
  4578. {
  4579. struct inode *inode = sb_dqopt(sb)->files[type];
  4580. handle_t *handle;
  4581. /* Force all delayed allocation blocks to be allocated.
  4582. * Caller already holds s_umount sem */
  4583. if (test_opt(sb, DELALLOC))
  4584. sync_filesystem(sb);
  4585. if (!inode)
  4586. goto out;
  4587. /* Update modification times of quota files when userspace can
  4588. * start looking at them */
  4589. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  4590. if (IS_ERR(handle))
  4591. goto out;
  4592. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  4593. ext4_mark_inode_dirty(handle, inode);
  4594. ext4_journal_stop(handle);
  4595. out:
  4596. return dquot_quota_off(sb, type);
  4597. }
  4598. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  4599. * acquiring the locks... As quota files are never truncated and quota code
  4600. * itself serializes the operations (and no one else should touch the files)
  4601. * we don't have to be afraid of races */
  4602. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  4603. size_t len, loff_t off)
  4604. {
  4605. struct inode *inode = sb_dqopt(sb)->files[type];
  4606. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  4607. int offset = off & (sb->s_blocksize - 1);
  4608. int tocopy;
  4609. size_t toread;
  4610. struct buffer_head *bh;
  4611. loff_t i_size = i_size_read(inode);
  4612. if (off > i_size)
  4613. return 0;
  4614. if (off+len > i_size)
  4615. len = i_size-off;
  4616. toread = len;
  4617. while (toread > 0) {
  4618. tocopy = sb->s_blocksize - offset < toread ?
  4619. sb->s_blocksize - offset : toread;
  4620. bh = ext4_bread(NULL, inode, blk, 0);
  4621. if (IS_ERR(bh))
  4622. return PTR_ERR(bh);
  4623. if (!bh) /* A hole? */
  4624. memset(data, 0, tocopy);
  4625. else
  4626. memcpy(data, bh->b_data+offset, tocopy);
  4627. brelse(bh);
  4628. offset = 0;
  4629. toread -= tocopy;
  4630. data += tocopy;
  4631. blk++;
  4632. }
  4633. return len;
  4634. }
  4635. /* Write to quotafile (we know the transaction is already started and has
  4636. * enough credits) */
  4637. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  4638. const char *data, size_t len, loff_t off)
  4639. {
  4640. struct inode *inode = sb_dqopt(sb)->files[type];
  4641. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  4642. int err, offset = off & (sb->s_blocksize - 1);
  4643. int retries = 0;
  4644. struct buffer_head *bh;
  4645. handle_t *handle = journal_current_handle();
  4646. if (EXT4_SB(sb)->s_journal && !handle) {
  4647. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  4648. " cancelled because transaction is not started",
  4649. (unsigned long long)off, (unsigned long long)len);
  4650. return -EIO;
  4651. }
  4652. /*
  4653. * Since we account only one data block in transaction credits,
  4654. * then it is impossible to cross a block boundary.
  4655. */
  4656. if (sb->s_blocksize - offset < len) {
  4657. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  4658. " cancelled because not block aligned",
  4659. (unsigned long long)off, (unsigned long long)len);
  4660. return -EIO;
  4661. }
  4662. do {
  4663. bh = ext4_bread(handle, inode, blk,
  4664. EXT4_GET_BLOCKS_CREATE |
  4665. EXT4_GET_BLOCKS_METADATA_NOFAIL);
  4666. } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
  4667. ext4_should_retry_alloc(inode->i_sb, &retries));
  4668. if (IS_ERR(bh))
  4669. return PTR_ERR(bh);
  4670. if (!bh)
  4671. goto out;
  4672. BUFFER_TRACE(bh, "get write access");
  4673. err = ext4_journal_get_write_access(handle, bh);
  4674. if (err) {
  4675. brelse(bh);
  4676. return err;
  4677. }
  4678. lock_buffer(bh);
  4679. memcpy(bh->b_data+offset, data, len);
  4680. flush_dcache_page(bh->b_page);
  4681. unlock_buffer(bh);
  4682. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  4683. brelse(bh);
  4684. out:
  4685. if (inode->i_size < off + len) {
  4686. i_size_write(inode, off + len);
  4687. EXT4_I(inode)->i_disksize = inode->i_size;
  4688. ext4_mark_inode_dirty(handle, inode);
  4689. }
  4690. return len;
  4691. }
  4692. #endif
  4693. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  4694. const char *dev_name, void *data)
  4695. {
  4696. return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
  4697. }
  4698. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  4699. static inline void register_as_ext2(void)
  4700. {
  4701. int err = register_filesystem(&ext2_fs_type);
  4702. if (err)
  4703. printk(KERN_WARNING
  4704. "EXT4-fs: Unable to register as ext2 (%d)\n", err);
  4705. }
  4706. static inline void unregister_as_ext2(void)
  4707. {
  4708. unregister_filesystem(&ext2_fs_type);
  4709. }
  4710. static inline int ext2_feature_set_ok(struct super_block *sb)
  4711. {
  4712. if (ext4_has_unknown_ext2_incompat_features(sb))
  4713. return 0;
  4714. if (sb->s_flags & MS_RDONLY)
  4715. return 1;
  4716. if (ext4_has_unknown_ext2_ro_compat_features(sb))
  4717. return 0;
  4718. return 1;
  4719. }
  4720. #else
  4721. static inline void register_as_ext2(void) { }
  4722. static inline void unregister_as_ext2(void) { }
  4723. static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
  4724. #endif
  4725. static inline void register_as_ext3(void)
  4726. {
  4727. int err = register_filesystem(&ext3_fs_type);
  4728. if (err)
  4729. printk(KERN_WARNING
  4730. "EXT4-fs: Unable to register as ext3 (%d)\n", err);
  4731. }
  4732. static inline void unregister_as_ext3(void)
  4733. {
  4734. unregister_filesystem(&ext3_fs_type);
  4735. }
  4736. static inline int ext3_feature_set_ok(struct super_block *sb)
  4737. {
  4738. if (ext4_has_unknown_ext3_incompat_features(sb))
  4739. return 0;
  4740. if (!ext4_has_feature_journal(sb))
  4741. return 0;
  4742. if (sb->s_flags & MS_RDONLY)
  4743. return 1;
  4744. if (ext4_has_unknown_ext3_ro_compat_features(sb))
  4745. return 0;
  4746. return 1;
  4747. }
  4748. static struct file_system_type ext4_fs_type = {
  4749. .owner = THIS_MODULE,
  4750. .name = "ext4",
  4751. .mount = ext4_mount,
  4752. .kill_sb = kill_block_super,
  4753. .fs_flags = FS_REQUIRES_DEV,
  4754. };
  4755. MODULE_ALIAS_FS("ext4");
  4756. /* Shared across all ext4 file systems */
  4757. wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
  4758. struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
  4759. static int __init ext4_init_fs(void)
  4760. {
  4761. int i, err;
  4762. ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
  4763. ext4_li_info = NULL;
  4764. mutex_init(&ext4_li_mtx);
  4765. /* Build-time check for flags consistency */
  4766. ext4_check_flag_values();
  4767. for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
  4768. mutex_init(&ext4__aio_mutex[i]);
  4769. init_waitqueue_head(&ext4__ioend_wq[i]);
  4770. }
  4771. err = ext4_init_es();
  4772. if (err)
  4773. return err;
  4774. err = ext4_init_pageio();
  4775. if (err)
  4776. goto out5;
  4777. err = ext4_init_system_zone();
  4778. if (err)
  4779. goto out4;
  4780. err = ext4_init_sysfs();
  4781. if (err)
  4782. goto out3;
  4783. err = ext4_init_mballoc();
  4784. if (err)
  4785. goto out2;
  4786. else
  4787. ext4_mballoc_ready = 1;
  4788. err = init_inodecache();
  4789. if (err)
  4790. goto out1;
  4791. register_as_ext3();
  4792. register_as_ext2();
  4793. err = register_filesystem(&ext4_fs_type);
  4794. if (err)
  4795. goto out;
  4796. return 0;
  4797. out:
  4798. unregister_as_ext2();
  4799. unregister_as_ext3();
  4800. destroy_inodecache();
  4801. out1:
  4802. ext4_mballoc_ready = 0;
  4803. ext4_exit_mballoc();
  4804. out2:
  4805. ext4_exit_sysfs();
  4806. out3:
  4807. ext4_exit_system_zone();
  4808. out4:
  4809. ext4_exit_pageio();
  4810. out5:
  4811. ext4_exit_es();
  4812. return err;
  4813. }
  4814. static void __exit ext4_exit_fs(void)
  4815. {
  4816. ext4_exit_crypto();
  4817. ext4_destroy_lazyinit_thread();
  4818. unregister_as_ext2();
  4819. unregister_as_ext3();
  4820. unregister_filesystem(&ext4_fs_type);
  4821. destroy_inodecache();
  4822. ext4_exit_mballoc();
  4823. ext4_exit_sysfs();
  4824. ext4_exit_system_zone();
  4825. ext4_exit_pageio();
  4826. ext4_exit_es();
  4827. }
  4828. MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
  4829. MODULE_DESCRIPTION("Fourth Extended Filesystem");
  4830. MODULE_LICENSE("GPL");
  4831. module_init(ext4_init_fs)
  4832. module_exit(ext4_exit_fs)