super.c 165 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860
  1. /*
  2. * linux/fs/ext4/super.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Big-endian to little-endian byte-swapping/bitmaps by
  16. * David S. Miller (davem@caip.rutgers.edu), 1995
  17. */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/fs.h>
  21. #include <linux/time.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/slab.h>
  24. #include <linux/init.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/parser.h>
  28. #include <linux/buffer_head.h>
  29. #include <linux/exportfs.h>
  30. #include <linux/vfs.h>
  31. #include <linux/random.h>
  32. #include <linux/mount.h>
  33. #include <linux/namei.h>
  34. #include <linux/quotaops.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/ctype.h>
  37. #include <linux/log2.h>
  38. #include <linux/crc16.h>
  39. #include <linux/dax.h>
  40. #include <linux/cleancache.h>
  41. #include <linux/uaccess.h>
  42. #include <linux/kthread.h>
  43. #include <linux/freezer.h>
  44. #include "ext4.h"
  45. #include "ext4_extents.h" /* Needed for trace points definition */
  46. #include "ext4_jbd2.h"
  47. #include "xattr.h"
  48. #include "acl.h"
  49. #include "mballoc.h"
  50. #include "fsmap.h"
  51. #define CREATE_TRACE_POINTS
  52. #include <trace/events/ext4.h>
  53. static struct ext4_lazy_init *ext4_li_info;
  54. static struct mutex ext4_li_mtx;
  55. static struct ratelimit_state ext4_mount_msg_ratelimit;
  56. static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
  57. unsigned long journal_devnum);
  58. static int ext4_show_options(struct seq_file *seq, struct dentry *root);
  59. static int ext4_commit_super(struct super_block *sb, int sync);
  60. static void ext4_mark_recovery_complete(struct super_block *sb,
  61. struct ext4_super_block *es);
  62. static void ext4_clear_journal_err(struct super_block *sb,
  63. struct ext4_super_block *es);
  64. static int ext4_sync_fs(struct super_block *sb, int wait);
  65. static int ext4_remount(struct super_block *sb, int *flags, char *data);
  66. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
  67. static int ext4_unfreeze(struct super_block *sb);
  68. static int ext4_freeze(struct super_block *sb);
  69. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  70. const char *dev_name, void *data);
  71. static inline int ext2_feature_set_ok(struct super_block *sb);
  72. static inline int ext3_feature_set_ok(struct super_block *sb);
  73. static int ext4_feature_set_ok(struct super_block *sb, int readonly);
  74. static void ext4_destroy_lazyinit_thread(void);
  75. static void ext4_unregister_li_request(struct super_block *sb);
  76. static void ext4_clear_request_list(void);
  77. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  78. unsigned int journal_inum);
  79. /*
  80. * Lock ordering
  81. *
  82. * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
  83. * i_mmap_rwsem (inode->i_mmap_rwsem)!
  84. *
  85. * page fault path:
  86. * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
  87. * page lock -> i_data_sem (rw)
  88. *
  89. * buffered write path:
  90. * sb_start_write -> i_mutex -> mmap_sem
  91. * sb_start_write -> i_mutex -> transaction start -> page lock ->
  92. * i_data_sem (rw)
  93. *
  94. * truncate:
  95. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
  96. * i_mmap_rwsem (w) -> page lock
  97. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
  98. * transaction start -> i_data_sem (rw)
  99. *
  100. * direct IO:
  101. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) -> mmap_sem
  102. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) ->
  103. * transaction start -> i_data_sem (rw)
  104. *
  105. * writepages:
  106. * transaction start -> page lock(s) -> i_data_sem (rw)
  107. */
  108. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  109. static struct file_system_type ext2_fs_type = {
  110. .owner = THIS_MODULE,
  111. .name = "ext2",
  112. .mount = ext4_mount,
  113. .kill_sb = kill_block_super,
  114. .fs_flags = FS_REQUIRES_DEV,
  115. };
  116. MODULE_ALIAS_FS("ext2");
  117. MODULE_ALIAS("ext2");
  118. #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
  119. #else
  120. #define IS_EXT2_SB(sb) (0)
  121. #endif
  122. static struct file_system_type ext3_fs_type = {
  123. .owner = THIS_MODULE,
  124. .name = "ext3",
  125. .mount = ext4_mount,
  126. .kill_sb = kill_block_super,
  127. .fs_flags = FS_REQUIRES_DEV,
  128. };
  129. MODULE_ALIAS_FS("ext3");
  130. MODULE_ALIAS("ext3");
  131. #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
  132. static int ext4_verify_csum_type(struct super_block *sb,
  133. struct ext4_super_block *es)
  134. {
  135. if (!ext4_has_feature_metadata_csum(sb))
  136. return 1;
  137. return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
  138. }
  139. static __le32 ext4_superblock_csum(struct super_block *sb,
  140. struct ext4_super_block *es)
  141. {
  142. struct ext4_sb_info *sbi = EXT4_SB(sb);
  143. int offset = offsetof(struct ext4_super_block, s_checksum);
  144. __u32 csum;
  145. csum = ext4_chksum(sbi, ~0, (char *)es, offset);
  146. return cpu_to_le32(csum);
  147. }
  148. static int ext4_superblock_csum_verify(struct super_block *sb,
  149. struct ext4_super_block *es)
  150. {
  151. if (!ext4_has_metadata_csum(sb))
  152. return 1;
  153. return es->s_checksum == ext4_superblock_csum(sb, es);
  154. }
  155. void ext4_superblock_csum_set(struct super_block *sb)
  156. {
  157. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  158. if (!ext4_has_metadata_csum(sb))
  159. return;
  160. es->s_checksum = ext4_superblock_csum(sb, es);
  161. }
  162. void *ext4_kvmalloc(size_t size, gfp_t flags)
  163. {
  164. void *ret;
  165. ret = kmalloc(size, flags | __GFP_NOWARN);
  166. if (!ret)
  167. ret = __vmalloc(size, flags, PAGE_KERNEL);
  168. return ret;
  169. }
  170. void *ext4_kvzalloc(size_t size, gfp_t flags)
  171. {
  172. void *ret;
  173. ret = kzalloc(size, flags | __GFP_NOWARN);
  174. if (!ret)
  175. ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
  176. return ret;
  177. }
  178. ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
  179. struct ext4_group_desc *bg)
  180. {
  181. return le32_to_cpu(bg->bg_block_bitmap_lo) |
  182. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  183. (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
  184. }
  185. ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
  186. struct ext4_group_desc *bg)
  187. {
  188. return le32_to_cpu(bg->bg_inode_bitmap_lo) |
  189. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  190. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
  191. }
  192. ext4_fsblk_t ext4_inode_table(struct super_block *sb,
  193. struct ext4_group_desc *bg)
  194. {
  195. return le32_to_cpu(bg->bg_inode_table_lo) |
  196. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  197. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
  198. }
  199. __u32 ext4_free_group_clusters(struct super_block *sb,
  200. struct ext4_group_desc *bg)
  201. {
  202. return le16_to_cpu(bg->bg_free_blocks_count_lo) |
  203. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  204. (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
  205. }
  206. __u32 ext4_free_inodes_count(struct super_block *sb,
  207. struct ext4_group_desc *bg)
  208. {
  209. return le16_to_cpu(bg->bg_free_inodes_count_lo) |
  210. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  211. (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
  212. }
  213. __u32 ext4_used_dirs_count(struct super_block *sb,
  214. struct ext4_group_desc *bg)
  215. {
  216. return le16_to_cpu(bg->bg_used_dirs_count_lo) |
  217. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  218. (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
  219. }
  220. __u32 ext4_itable_unused_count(struct super_block *sb,
  221. struct ext4_group_desc *bg)
  222. {
  223. return le16_to_cpu(bg->bg_itable_unused_lo) |
  224. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  225. (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
  226. }
  227. void ext4_block_bitmap_set(struct super_block *sb,
  228. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  229. {
  230. bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
  231. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  232. bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
  233. }
  234. void ext4_inode_bitmap_set(struct super_block *sb,
  235. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  236. {
  237. bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
  238. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  239. bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
  240. }
  241. void ext4_inode_table_set(struct super_block *sb,
  242. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  243. {
  244. bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
  245. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  246. bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
  247. }
  248. void ext4_free_group_clusters_set(struct super_block *sb,
  249. struct ext4_group_desc *bg, __u32 count)
  250. {
  251. bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
  252. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  253. bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
  254. }
  255. void ext4_free_inodes_set(struct super_block *sb,
  256. struct ext4_group_desc *bg, __u32 count)
  257. {
  258. bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
  259. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  260. bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
  261. }
  262. void ext4_used_dirs_set(struct super_block *sb,
  263. struct ext4_group_desc *bg, __u32 count)
  264. {
  265. bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
  266. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  267. bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
  268. }
  269. void ext4_itable_unused_set(struct super_block *sb,
  270. struct ext4_group_desc *bg, __u32 count)
  271. {
  272. bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
  273. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  274. bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
  275. }
  276. static void __save_error_info(struct super_block *sb, const char *func,
  277. unsigned int line)
  278. {
  279. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  280. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  281. if (bdev_read_only(sb->s_bdev))
  282. return;
  283. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  284. es->s_last_error_time = cpu_to_le32(get_seconds());
  285. strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
  286. es->s_last_error_line = cpu_to_le32(line);
  287. if (!es->s_first_error_time) {
  288. es->s_first_error_time = es->s_last_error_time;
  289. strncpy(es->s_first_error_func, func,
  290. sizeof(es->s_first_error_func));
  291. es->s_first_error_line = cpu_to_le32(line);
  292. es->s_first_error_ino = es->s_last_error_ino;
  293. es->s_first_error_block = es->s_last_error_block;
  294. }
  295. /*
  296. * Start the daily error reporting function if it hasn't been
  297. * started already
  298. */
  299. if (!es->s_error_count)
  300. mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
  301. le32_add_cpu(&es->s_error_count, 1);
  302. }
  303. static void save_error_info(struct super_block *sb, const char *func,
  304. unsigned int line)
  305. {
  306. __save_error_info(sb, func, line);
  307. ext4_commit_super(sb, 1);
  308. }
  309. /*
  310. * The del_gendisk() function uninitializes the disk-specific data
  311. * structures, including the bdi structure, without telling anyone
  312. * else. Once this happens, any attempt to call mark_buffer_dirty()
  313. * (for example, by ext4_commit_super), will cause a kernel OOPS.
  314. * This is a kludge to prevent these oops until we can put in a proper
  315. * hook in del_gendisk() to inform the VFS and file system layers.
  316. */
  317. static int block_device_ejected(struct super_block *sb)
  318. {
  319. struct inode *bd_inode = sb->s_bdev->bd_inode;
  320. struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
  321. return bdi->dev == NULL;
  322. }
  323. static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
  324. {
  325. struct super_block *sb = journal->j_private;
  326. struct ext4_sb_info *sbi = EXT4_SB(sb);
  327. int error = is_journal_aborted(journal);
  328. struct ext4_journal_cb_entry *jce;
  329. BUG_ON(txn->t_state == T_FINISHED);
  330. ext4_process_freed_data(sb, txn->t_tid);
  331. spin_lock(&sbi->s_md_lock);
  332. while (!list_empty(&txn->t_private_list)) {
  333. jce = list_entry(txn->t_private_list.next,
  334. struct ext4_journal_cb_entry, jce_list);
  335. list_del_init(&jce->jce_list);
  336. spin_unlock(&sbi->s_md_lock);
  337. jce->jce_func(sb, jce, error);
  338. spin_lock(&sbi->s_md_lock);
  339. }
  340. spin_unlock(&sbi->s_md_lock);
  341. }
  342. /* Deal with the reporting of failure conditions on a filesystem such as
  343. * inconsistencies detected or read IO failures.
  344. *
  345. * On ext2, we can store the error state of the filesystem in the
  346. * superblock. That is not possible on ext4, because we may have other
  347. * write ordering constraints on the superblock which prevent us from
  348. * writing it out straight away; and given that the journal is about to
  349. * be aborted, we can't rely on the current, or future, transactions to
  350. * write out the superblock safely.
  351. *
  352. * We'll just use the jbd2_journal_abort() error code to record an error in
  353. * the journal instead. On recovery, the journal will complain about
  354. * that error until we've noted it down and cleared it.
  355. */
  356. static void ext4_handle_error(struct super_block *sb)
  357. {
  358. if (sb_rdonly(sb))
  359. return;
  360. if (!test_opt(sb, ERRORS_CONT)) {
  361. journal_t *journal = EXT4_SB(sb)->s_journal;
  362. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  363. if (journal)
  364. jbd2_journal_abort(journal, -EIO);
  365. }
  366. if (test_opt(sb, ERRORS_RO)) {
  367. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  368. /*
  369. * Make sure updated value of ->s_mount_flags will be visible
  370. * before ->s_flags update
  371. */
  372. smp_wmb();
  373. sb->s_flags |= MS_RDONLY;
  374. }
  375. if (test_opt(sb, ERRORS_PANIC)) {
  376. if (EXT4_SB(sb)->s_journal &&
  377. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  378. return;
  379. panic("EXT4-fs (device %s): panic forced after error\n",
  380. sb->s_id);
  381. }
  382. }
  383. #define ext4_error_ratelimit(sb) \
  384. ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
  385. "EXT4-fs error")
  386. void __ext4_error(struct super_block *sb, const char *function,
  387. unsigned int line, const char *fmt, ...)
  388. {
  389. struct va_format vaf;
  390. va_list args;
  391. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  392. return;
  393. if (ext4_error_ratelimit(sb)) {
  394. va_start(args, fmt);
  395. vaf.fmt = fmt;
  396. vaf.va = &args;
  397. printk(KERN_CRIT
  398. "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
  399. sb->s_id, function, line, current->comm, &vaf);
  400. va_end(args);
  401. }
  402. save_error_info(sb, function, line);
  403. ext4_handle_error(sb);
  404. }
  405. void __ext4_error_inode(struct inode *inode, const char *function,
  406. unsigned int line, ext4_fsblk_t block,
  407. const char *fmt, ...)
  408. {
  409. va_list args;
  410. struct va_format vaf;
  411. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  412. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  413. return;
  414. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  415. es->s_last_error_block = cpu_to_le64(block);
  416. if (ext4_error_ratelimit(inode->i_sb)) {
  417. va_start(args, fmt);
  418. vaf.fmt = fmt;
  419. vaf.va = &args;
  420. if (block)
  421. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  422. "inode #%lu: block %llu: comm %s: %pV\n",
  423. inode->i_sb->s_id, function, line, inode->i_ino,
  424. block, current->comm, &vaf);
  425. else
  426. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  427. "inode #%lu: comm %s: %pV\n",
  428. inode->i_sb->s_id, function, line, inode->i_ino,
  429. current->comm, &vaf);
  430. va_end(args);
  431. }
  432. save_error_info(inode->i_sb, function, line);
  433. ext4_handle_error(inode->i_sb);
  434. }
  435. void __ext4_error_file(struct file *file, const char *function,
  436. unsigned int line, ext4_fsblk_t block,
  437. const char *fmt, ...)
  438. {
  439. va_list args;
  440. struct va_format vaf;
  441. struct ext4_super_block *es;
  442. struct inode *inode = file_inode(file);
  443. char pathname[80], *path;
  444. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  445. return;
  446. es = EXT4_SB(inode->i_sb)->s_es;
  447. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  448. if (ext4_error_ratelimit(inode->i_sb)) {
  449. path = file_path(file, pathname, sizeof(pathname));
  450. if (IS_ERR(path))
  451. path = "(unknown)";
  452. va_start(args, fmt);
  453. vaf.fmt = fmt;
  454. vaf.va = &args;
  455. if (block)
  456. printk(KERN_CRIT
  457. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  458. "block %llu: comm %s: path %s: %pV\n",
  459. inode->i_sb->s_id, function, line, inode->i_ino,
  460. block, current->comm, path, &vaf);
  461. else
  462. printk(KERN_CRIT
  463. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  464. "comm %s: path %s: %pV\n",
  465. inode->i_sb->s_id, function, line, inode->i_ino,
  466. current->comm, path, &vaf);
  467. va_end(args);
  468. }
  469. save_error_info(inode->i_sb, function, line);
  470. ext4_handle_error(inode->i_sb);
  471. }
  472. const char *ext4_decode_error(struct super_block *sb, int errno,
  473. char nbuf[16])
  474. {
  475. char *errstr = NULL;
  476. switch (errno) {
  477. case -EFSCORRUPTED:
  478. errstr = "Corrupt filesystem";
  479. break;
  480. case -EFSBADCRC:
  481. errstr = "Filesystem failed CRC";
  482. break;
  483. case -EIO:
  484. errstr = "IO failure";
  485. break;
  486. case -ENOMEM:
  487. errstr = "Out of memory";
  488. break;
  489. case -EROFS:
  490. if (!sb || (EXT4_SB(sb)->s_journal &&
  491. EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
  492. errstr = "Journal has aborted";
  493. else
  494. errstr = "Readonly filesystem";
  495. break;
  496. default:
  497. /* If the caller passed in an extra buffer for unknown
  498. * errors, textualise them now. Else we just return
  499. * NULL. */
  500. if (nbuf) {
  501. /* Check for truncated error codes... */
  502. if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
  503. errstr = nbuf;
  504. }
  505. break;
  506. }
  507. return errstr;
  508. }
  509. /* __ext4_std_error decodes expected errors from journaling functions
  510. * automatically and invokes the appropriate error response. */
  511. void __ext4_std_error(struct super_block *sb, const char *function,
  512. unsigned int line, int errno)
  513. {
  514. char nbuf[16];
  515. const char *errstr;
  516. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  517. return;
  518. /* Special case: if the error is EROFS, and we're not already
  519. * inside a transaction, then there's really no point in logging
  520. * an error. */
  521. if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
  522. return;
  523. if (ext4_error_ratelimit(sb)) {
  524. errstr = ext4_decode_error(sb, errno, nbuf);
  525. printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
  526. sb->s_id, function, line, errstr);
  527. }
  528. save_error_info(sb, function, line);
  529. ext4_handle_error(sb);
  530. }
  531. /*
  532. * ext4_abort is a much stronger failure handler than ext4_error. The
  533. * abort function may be used to deal with unrecoverable failures such
  534. * as journal IO errors or ENOMEM at a critical moment in log management.
  535. *
  536. * We unconditionally force the filesystem into an ABORT|READONLY state,
  537. * unless the error response on the fs has been set to panic in which
  538. * case we take the easy way out and panic immediately.
  539. */
  540. void __ext4_abort(struct super_block *sb, const char *function,
  541. unsigned int line, const char *fmt, ...)
  542. {
  543. struct va_format vaf;
  544. va_list args;
  545. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  546. return;
  547. save_error_info(sb, function, line);
  548. va_start(args, fmt);
  549. vaf.fmt = fmt;
  550. vaf.va = &args;
  551. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
  552. sb->s_id, function, line, &vaf);
  553. va_end(args);
  554. if (sb_rdonly(sb) == 0) {
  555. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  556. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  557. /*
  558. * Make sure updated value of ->s_mount_flags will be visible
  559. * before ->s_flags update
  560. */
  561. smp_wmb();
  562. sb->s_flags |= MS_RDONLY;
  563. if (EXT4_SB(sb)->s_journal)
  564. jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
  565. save_error_info(sb, function, line);
  566. }
  567. if (test_opt(sb, ERRORS_PANIC)) {
  568. if (EXT4_SB(sb)->s_journal &&
  569. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  570. return;
  571. panic("EXT4-fs panic from previous error\n");
  572. }
  573. }
  574. void __ext4_msg(struct super_block *sb,
  575. const char *prefix, const char *fmt, ...)
  576. {
  577. struct va_format vaf;
  578. va_list args;
  579. if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
  580. return;
  581. va_start(args, fmt);
  582. vaf.fmt = fmt;
  583. vaf.va = &args;
  584. printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  585. va_end(args);
  586. }
  587. #define ext4_warning_ratelimit(sb) \
  588. ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
  589. "EXT4-fs warning")
  590. void __ext4_warning(struct super_block *sb, const char *function,
  591. unsigned int line, const char *fmt, ...)
  592. {
  593. struct va_format vaf;
  594. va_list args;
  595. if (!ext4_warning_ratelimit(sb))
  596. return;
  597. va_start(args, fmt);
  598. vaf.fmt = fmt;
  599. vaf.va = &args;
  600. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
  601. sb->s_id, function, line, &vaf);
  602. va_end(args);
  603. }
  604. void __ext4_warning_inode(const struct inode *inode, const char *function,
  605. unsigned int line, const char *fmt, ...)
  606. {
  607. struct va_format vaf;
  608. va_list args;
  609. if (!ext4_warning_ratelimit(inode->i_sb))
  610. return;
  611. va_start(args, fmt);
  612. vaf.fmt = fmt;
  613. vaf.va = &args;
  614. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
  615. "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
  616. function, line, inode->i_ino, current->comm, &vaf);
  617. va_end(args);
  618. }
  619. void __ext4_grp_locked_error(const char *function, unsigned int line,
  620. struct super_block *sb, ext4_group_t grp,
  621. unsigned long ino, ext4_fsblk_t block,
  622. const char *fmt, ...)
  623. __releases(bitlock)
  624. __acquires(bitlock)
  625. {
  626. struct va_format vaf;
  627. va_list args;
  628. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  629. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  630. return;
  631. es->s_last_error_ino = cpu_to_le32(ino);
  632. es->s_last_error_block = cpu_to_le64(block);
  633. __save_error_info(sb, function, line);
  634. if (ext4_error_ratelimit(sb)) {
  635. va_start(args, fmt);
  636. vaf.fmt = fmt;
  637. vaf.va = &args;
  638. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
  639. sb->s_id, function, line, grp);
  640. if (ino)
  641. printk(KERN_CONT "inode %lu: ", ino);
  642. if (block)
  643. printk(KERN_CONT "block %llu:",
  644. (unsigned long long) block);
  645. printk(KERN_CONT "%pV\n", &vaf);
  646. va_end(args);
  647. }
  648. if (test_opt(sb, ERRORS_CONT)) {
  649. ext4_commit_super(sb, 0);
  650. return;
  651. }
  652. ext4_unlock_group(sb, grp);
  653. ext4_handle_error(sb);
  654. /*
  655. * We only get here in the ERRORS_RO case; relocking the group
  656. * may be dangerous, but nothing bad will happen since the
  657. * filesystem will have already been marked read/only and the
  658. * journal has been aborted. We return 1 as a hint to callers
  659. * who might what to use the return value from
  660. * ext4_grp_locked_error() to distinguish between the
  661. * ERRORS_CONT and ERRORS_RO case, and perhaps return more
  662. * aggressively from the ext4 function in question, with a
  663. * more appropriate error code.
  664. */
  665. ext4_lock_group(sb, grp);
  666. return;
  667. }
  668. void ext4_update_dynamic_rev(struct super_block *sb)
  669. {
  670. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  671. if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
  672. return;
  673. ext4_warning(sb,
  674. "updating to rev %d because of new feature flag, "
  675. "running e2fsck is recommended",
  676. EXT4_DYNAMIC_REV);
  677. es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
  678. es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
  679. es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
  680. /* leave es->s_feature_*compat flags alone */
  681. /* es->s_uuid will be set by e2fsck if empty */
  682. /*
  683. * The rest of the superblock fields should be zero, and if not it
  684. * means they are likely already in use, so leave them alone. We
  685. * can leave it up to e2fsck to clean up any inconsistencies there.
  686. */
  687. }
  688. /*
  689. * Open the external journal device
  690. */
  691. static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
  692. {
  693. struct block_device *bdev;
  694. char b[BDEVNAME_SIZE];
  695. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
  696. if (IS_ERR(bdev))
  697. goto fail;
  698. return bdev;
  699. fail:
  700. ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
  701. __bdevname(dev, b), PTR_ERR(bdev));
  702. return NULL;
  703. }
  704. /*
  705. * Release the journal device
  706. */
  707. static void ext4_blkdev_put(struct block_device *bdev)
  708. {
  709. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  710. }
  711. static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
  712. {
  713. struct block_device *bdev;
  714. bdev = sbi->journal_bdev;
  715. if (bdev) {
  716. ext4_blkdev_put(bdev);
  717. sbi->journal_bdev = NULL;
  718. }
  719. }
  720. static inline struct inode *orphan_list_entry(struct list_head *l)
  721. {
  722. return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
  723. }
  724. static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
  725. {
  726. struct list_head *l;
  727. ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
  728. le32_to_cpu(sbi->s_es->s_last_orphan));
  729. printk(KERN_ERR "sb_info orphan list:\n");
  730. list_for_each(l, &sbi->s_orphan) {
  731. struct inode *inode = orphan_list_entry(l);
  732. printk(KERN_ERR " "
  733. "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
  734. inode->i_sb->s_id, inode->i_ino, inode,
  735. inode->i_mode, inode->i_nlink,
  736. NEXT_ORPHAN(inode));
  737. }
  738. }
  739. #ifdef CONFIG_QUOTA
  740. static int ext4_quota_off(struct super_block *sb, int type);
  741. static inline void ext4_quota_off_umount(struct super_block *sb)
  742. {
  743. int type;
  744. /* Use our quota_off function to clear inode flags etc. */
  745. for (type = 0; type < EXT4_MAXQUOTAS; type++)
  746. ext4_quota_off(sb, type);
  747. }
  748. #else
  749. static inline void ext4_quota_off_umount(struct super_block *sb)
  750. {
  751. }
  752. #endif
  753. static void ext4_put_super(struct super_block *sb)
  754. {
  755. struct ext4_sb_info *sbi = EXT4_SB(sb);
  756. struct ext4_super_block *es = sbi->s_es;
  757. int aborted = 0;
  758. int i, err;
  759. ext4_unregister_li_request(sb);
  760. ext4_quota_off_umount(sb);
  761. flush_workqueue(sbi->rsv_conversion_wq);
  762. destroy_workqueue(sbi->rsv_conversion_wq);
  763. if (sbi->s_journal) {
  764. aborted = is_journal_aborted(sbi->s_journal);
  765. err = jbd2_journal_destroy(sbi->s_journal);
  766. sbi->s_journal = NULL;
  767. if ((err < 0) && !aborted)
  768. ext4_abort(sb, "Couldn't clean up the journal");
  769. }
  770. ext4_unregister_sysfs(sb);
  771. ext4_es_unregister_shrinker(sbi);
  772. del_timer_sync(&sbi->s_err_report);
  773. ext4_release_system_zone(sb);
  774. ext4_mb_release(sb);
  775. ext4_ext_release(sb);
  776. if (!sb_rdonly(sb) && !aborted) {
  777. ext4_clear_feature_journal_needs_recovery(sb);
  778. es->s_state = cpu_to_le16(sbi->s_mount_state);
  779. }
  780. if (!sb_rdonly(sb))
  781. ext4_commit_super(sb, 1);
  782. for (i = 0; i < sbi->s_gdb_count; i++)
  783. brelse(sbi->s_group_desc[i]);
  784. kvfree(sbi->s_group_desc);
  785. kvfree(sbi->s_flex_groups);
  786. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  787. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  788. percpu_counter_destroy(&sbi->s_dirs_counter);
  789. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  790. percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
  791. #ifdef CONFIG_QUOTA
  792. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  793. kfree(sbi->s_qf_names[i]);
  794. #endif
  795. /* Debugging code just in case the in-memory inode orphan list
  796. * isn't empty. The on-disk one can be non-empty if we've
  797. * detected an error and taken the fs readonly, but the
  798. * in-memory list had better be clean by this point. */
  799. if (!list_empty(&sbi->s_orphan))
  800. dump_orphan_list(sb, sbi);
  801. J_ASSERT(list_empty(&sbi->s_orphan));
  802. sync_blockdev(sb->s_bdev);
  803. invalidate_bdev(sb->s_bdev);
  804. if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
  805. /*
  806. * Invalidate the journal device's buffers. We don't want them
  807. * floating about in memory - the physical journal device may
  808. * hotswapped, and it breaks the `ro-after' testing code.
  809. */
  810. sync_blockdev(sbi->journal_bdev);
  811. invalidate_bdev(sbi->journal_bdev);
  812. ext4_blkdev_remove(sbi);
  813. }
  814. if (sbi->s_ea_inode_cache) {
  815. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  816. sbi->s_ea_inode_cache = NULL;
  817. }
  818. if (sbi->s_ea_block_cache) {
  819. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  820. sbi->s_ea_block_cache = NULL;
  821. }
  822. if (sbi->s_mmp_tsk)
  823. kthread_stop(sbi->s_mmp_tsk);
  824. brelse(sbi->s_sbh);
  825. sb->s_fs_info = NULL;
  826. /*
  827. * Now that we are completely done shutting down the
  828. * superblock, we need to actually destroy the kobject.
  829. */
  830. kobject_put(&sbi->s_kobj);
  831. wait_for_completion(&sbi->s_kobj_unregister);
  832. if (sbi->s_chksum_driver)
  833. crypto_free_shash(sbi->s_chksum_driver);
  834. kfree(sbi->s_blockgroup_lock);
  835. fs_put_dax(sbi->s_daxdev);
  836. kfree(sbi);
  837. }
  838. static struct kmem_cache *ext4_inode_cachep;
  839. /*
  840. * Called inside transaction, so use GFP_NOFS
  841. */
  842. static struct inode *ext4_alloc_inode(struct super_block *sb)
  843. {
  844. struct ext4_inode_info *ei;
  845. ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
  846. if (!ei)
  847. return NULL;
  848. ei->vfs_inode.i_version = 1;
  849. spin_lock_init(&ei->i_raw_lock);
  850. INIT_LIST_HEAD(&ei->i_prealloc_list);
  851. spin_lock_init(&ei->i_prealloc_lock);
  852. ext4_es_init_tree(&ei->i_es_tree);
  853. rwlock_init(&ei->i_es_lock);
  854. INIT_LIST_HEAD(&ei->i_es_list);
  855. ei->i_es_all_nr = 0;
  856. ei->i_es_shk_nr = 0;
  857. ei->i_es_shrink_lblk = 0;
  858. ei->i_reserved_data_blocks = 0;
  859. ei->i_da_metadata_calc_len = 0;
  860. ei->i_da_metadata_calc_last_lblock = 0;
  861. spin_lock_init(&(ei->i_block_reservation_lock));
  862. #ifdef CONFIG_QUOTA
  863. ei->i_reserved_quota = 0;
  864. memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
  865. #endif
  866. ei->jinode = NULL;
  867. INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
  868. spin_lock_init(&ei->i_completed_io_lock);
  869. ei->i_sync_tid = 0;
  870. ei->i_datasync_tid = 0;
  871. atomic_set(&ei->i_unwritten, 0);
  872. INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
  873. return &ei->vfs_inode;
  874. }
  875. static int ext4_drop_inode(struct inode *inode)
  876. {
  877. int drop = generic_drop_inode(inode);
  878. trace_ext4_drop_inode(inode, drop);
  879. return drop;
  880. }
  881. static void ext4_i_callback(struct rcu_head *head)
  882. {
  883. struct inode *inode = container_of(head, struct inode, i_rcu);
  884. kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
  885. }
  886. static void ext4_destroy_inode(struct inode *inode)
  887. {
  888. if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
  889. ext4_msg(inode->i_sb, KERN_ERR,
  890. "Inode %lu (%p): orphan list check failed!",
  891. inode->i_ino, EXT4_I(inode));
  892. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
  893. EXT4_I(inode), sizeof(struct ext4_inode_info),
  894. true);
  895. dump_stack();
  896. }
  897. call_rcu(&inode->i_rcu, ext4_i_callback);
  898. }
  899. static void init_once(void *foo)
  900. {
  901. struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
  902. INIT_LIST_HEAD(&ei->i_orphan);
  903. init_rwsem(&ei->xattr_sem);
  904. init_rwsem(&ei->i_data_sem);
  905. init_rwsem(&ei->i_mmap_sem);
  906. inode_init_once(&ei->vfs_inode);
  907. }
  908. static int __init init_inodecache(void)
  909. {
  910. ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
  911. sizeof(struct ext4_inode_info),
  912. 0, (SLAB_RECLAIM_ACCOUNT|
  913. SLAB_MEM_SPREAD|SLAB_ACCOUNT),
  914. init_once);
  915. if (ext4_inode_cachep == NULL)
  916. return -ENOMEM;
  917. return 0;
  918. }
  919. static void destroy_inodecache(void)
  920. {
  921. /*
  922. * Make sure all delayed rcu free inodes are flushed before we
  923. * destroy cache.
  924. */
  925. rcu_barrier();
  926. kmem_cache_destroy(ext4_inode_cachep);
  927. }
  928. void ext4_clear_inode(struct inode *inode)
  929. {
  930. invalidate_inode_buffers(inode);
  931. clear_inode(inode);
  932. dquot_drop(inode);
  933. ext4_discard_preallocations(inode);
  934. ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
  935. if (EXT4_I(inode)->jinode) {
  936. jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
  937. EXT4_I(inode)->jinode);
  938. jbd2_free_inode(EXT4_I(inode)->jinode);
  939. EXT4_I(inode)->jinode = NULL;
  940. }
  941. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  942. fscrypt_put_encryption_info(inode, NULL);
  943. #endif
  944. }
  945. static struct inode *ext4_nfs_get_inode(struct super_block *sb,
  946. u64 ino, u32 generation)
  947. {
  948. struct inode *inode;
  949. if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
  950. return ERR_PTR(-ESTALE);
  951. if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
  952. return ERR_PTR(-ESTALE);
  953. /* iget isn't really right if the inode is currently unallocated!!
  954. *
  955. * ext4_read_inode will return a bad_inode if the inode had been
  956. * deleted, so we should be safe.
  957. *
  958. * Currently we don't know the generation for parent directory, so
  959. * a generation of 0 means "accept any"
  960. */
  961. inode = ext4_iget_normal(sb, ino);
  962. if (IS_ERR(inode))
  963. return ERR_CAST(inode);
  964. if (generation && inode->i_generation != generation) {
  965. iput(inode);
  966. return ERR_PTR(-ESTALE);
  967. }
  968. return inode;
  969. }
  970. static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
  971. int fh_len, int fh_type)
  972. {
  973. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  974. ext4_nfs_get_inode);
  975. }
  976. static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
  977. int fh_len, int fh_type)
  978. {
  979. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  980. ext4_nfs_get_inode);
  981. }
  982. /*
  983. * Try to release metadata pages (indirect blocks, directories) which are
  984. * mapped via the block device. Since these pages could have journal heads
  985. * which would prevent try_to_free_buffers() from freeing them, we must use
  986. * jbd2 layer's try_to_free_buffers() function to release them.
  987. */
  988. static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
  989. gfp_t wait)
  990. {
  991. journal_t *journal = EXT4_SB(sb)->s_journal;
  992. WARN_ON(PageChecked(page));
  993. if (!page_has_buffers(page))
  994. return 0;
  995. if (journal)
  996. return jbd2_journal_try_to_free_buffers(journal, page,
  997. wait & ~__GFP_DIRECT_RECLAIM);
  998. return try_to_free_buffers(page);
  999. }
  1000. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1001. static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
  1002. {
  1003. return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1004. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
  1005. }
  1006. static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
  1007. void *fs_data)
  1008. {
  1009. handle_t *handle = fs_data;
  1010. int res, res2, credits, retries = 0;
  1011. /*
  1012. * Encrypting the root directory is not allowed because e2fsck expects
  1013. * lost+found to exist and be unencrypted, and encrypting the root
  1014. * directory would imply encrypting the lost+found directory as well as
  1015. * the filename "lost+found" itself.
  1016. */
  1017. if (inode->i_ino == EXT4_ROOT_INO)
  1018. return -EPERM;
  1019. res = ext4_convert_inline_data(inode);
  1020. if (res)
  1021. return res;
  1022. /*
  1023. * If a journal handle was specified, then the encryption context is
  1024. * being set on a new inode via inheritance and is part of a larger
  1025. * transaction to create the inode. Otherwise the encryption context is
  1026. * being set on an existing inode in its own transaction. Only in the
  1027. * latter case should the "retry on ENOSPC" logic be used.
  1028. */
  1029. if (handle) {
  1030. res = ext4_xattr_set_handle(handle, inode,
  1031. EXT4_XATTR_INDEX_ENCRYPTION,
  1032. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1033. ctx, len, 0);
  1034. if (!res) {
  1035. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1036. ext4_clear_inode_state(inode,
  1037. EXT4_STATE_MAY_INLINE_DATA);
  1038. /*
  1039. * Update inode->i_flags - e.g. S_DAX may get disabled
  1040. */
  1041. ext4_set_inode_flags(inode);
  1042. }
  1043. return res;
  1044. }
  1045. res = dquot_initialize(inode);
  1046. if (res)
  1047. return res;
  1048. retry:
  1049. res = ext4_xattr_set_credits(inode, len, false /* is_create */,
  1050. &credits);
  1051. if (res)
  1052. return res;
  1053. handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
  1054. if (IS_ERR(handle))
  1055. return PTR_ERR(handle);
  1056. res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1057. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1058. ctx, len, 0);
  1059. if (!res) {
  1060. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1061. /* Update inode->i_flags - e.g. S_DAX may get disabled */
  1062. ext4_set_inode_flags(inode);
  1063. res = ext4_mark_inode_dirty(handle, inode);
  1064. if (res)
  1065. EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
  1066. }
  1067. res2 = ext4_journal_stop(handle);
  1068. if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  1069. goto retry;
  1070. if (!res)
  1071. res = res2;
  1072. return res;
  1073. }
  1074. static bool ext4_dummy_context(struct inode *inode)
  1075. {
  1076. return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
  1077. }
  1078. static unsigned ext4_max_namelen(struct inode *inode)
  1079. {
  1080. return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
  1081. EXT4_NAME_LEN;
  1082. }
  1083. static const struct fscrypt_operations ext4_cryptops = {
  1084. .key_prefix = "ext4:",
  1085. .get_context = ext4_get_context,
  1086. .set_context = ext4_set_context,
  1087. .dummy_context = ext4_dummy_context,
  1088. .is_encrypted = ext4_encrypted_inode,
  1089. .empty_dir = ext4_empty_dir,
  1090. .max_namelen = ext4_max_namelen,
  1091. };
  1092. #else
  1093. static const struct fscrypt_operations ext4_cryptops = {
  1094. .is_encrypted = ext4_encrypted_inode,
  1095. };
  1096. #endif
  1097. #ifdef CONFIG_QUOTA
  1098. static const char * const quotatypes[] = INITQFNAMES;
  1099. #define QTYPE2NAME(t) (quotatypes[t])
  1100. static int ext4_write_dquot(struct dquot *dquot);
  1101. static int ext4_acquire_dquot(struct dquot *dquot);
  1102. static int ext4_release_dquot(struct dquot *dquot);
  1103. static int ext4_mark_dquot_dirty(struct dquot *dquot);
  1104. static int ext4_write_info(struct super_block *sb, int type);
  1105. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  1106. const struct path *path);
  1107. static int ext4_quota_on_mount(struct super_block *sb, int type);
  1108. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  1109. size_t len, loff_t off);
  1110. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  1111. const char *data, size_t len, loff_t off);
  1112. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  1113. unsigned int flags);
  1114. static int ext4_enable_quotas(struct super_block *sb);
  1115. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
  1116. static struct dquot **ext4_get_dquots(struct inode *inode)
  1117. {
  1118. return EXT4_I(inode)->i_dquot;
  1119. }
  1120. static const struct dquot_operations ext4_quota_operations = {
  1121. .get_reserved_space = ext4_get_reserved_space,
  1122. .write_dquot = ext4_write_dquot,
  1123. .acquire_dquot = ext4_acquire_dquot,
  1124. .release_dquot = ext4_release_dquot,
  1125. .mark_dirty = ext4_mark_dquot_dirty,
  1126. .write_info = ext4_write_info,
  1127. .alloc_dquot = dquot_alloc,
  1128. .destroy_dquot = dquot_destroy,
  1129. .get_projid = ext4_get_projid,
  1130. .get_inode_usage = ext4_get_inode_usage,
  1131. .get_next_id = ext4_get_next_id,
  1132. };
  1133. static const struct quotactl_ops ext4_qctl_operations = {
  1134. .quota_on = ext4_quota_on,
  1135. .quota_off = ext4_quota_off,
  1136. .quota_sync = dquot_quota_sync,
  1137. .get_state = dquot_get_state,
  1138. .set_info = dquot_set_dqinfo,
  1139. .get_dqblk = dquot_get_dqblk,
  1140. .set_dqblk = dquot_set_dqblk,
  1141. .get_nextdqblk = dquot_get_next_dqblk,
  1142. };
  1143. #endif
  1144. static const struct super_operations ext4_sops = {
  1145. .alloc_inode = ext4_alloc_inode,
  1146. .destroy_inode = ext4_destroy_inode,
  1147. .write_inode = ext4_write_inode,
  1148. .dirty_inode = ext4_dirty_inode,
  1149. .drop_inode = ext4_drop_inode,
  1150. .evict_inode = ext4_evict_inode,
  1151. .put_super = ext4_put_super,
  1152. .sync_fs = ext4_sync_fs,
  1153. .freeze_fs = ext4_freeze,
  1154. .unfreeze_fs = ext4_unfreeze,
  1155. .statfs = ext4_statfs,
  1156. .remount_fs = ext4_remount,
  1157. .show_options = ext4_show_options,
  1158. #ifdef CONFIG_QUOTA
  1159. .quota_read = ext4_quota_read,
  1160. .quota_write = ext4_quota_write,
  1161. .get_dquots = ext4_get_dquots,
  1162. #endif
  1163. .bdev_try_to_free_page = bdev_try_to_free_page,
  1164. };
  1165. static const struct export_operations ext4_export_ops = {
  1166. .fh_to_dentry = ext4_fh_to_dentry,
  1167. .fh_to_parent = ext4_fh_to_parent,
  1168. .get_parent = ext4_get_parent,
  1169. };
  1170. enum {
  1171. Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
  1172. Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
  1173. Opt_nouid32, Opt_debug, Opt_removed,
  1174. Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
  1175. Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
  1176. Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
  1177. Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
  1178. Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
  1179. Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
  1180. Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
  1181. Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
  1182. Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
  1183. Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
  1184. Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
  1185. Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
  1186. Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
  1187. Opt_inode_readahead_blks, Opt_journal_ioprio,
  1188. Opt_dioread_nolock, Opt_dioread_lock,
  1189. Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
  1190. Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
  1191. };
  1192. static const match_table_t tokens = {
  1193. {Opt_bsd_df, "bsddf"},
  1194. {Opt_minix_df, "minixdf"},
  1195. {Opt_grpid, "grpid"},
  1196. {Opt_grpid, "bsdgroups"},
  1197. {Opt_nogrpid, "nogrpid"},
  1198. {Opt_nogrpid, "sysvgroups"},
  1199. {Opt_resgid, "resgid=%u"},
  1200. {Opt_resuid, "resuid=%u"},
  1201. {Opt_sb, "sb=%u"},
  1202. {Opt_err_cont, "errors=continue"},
  1203. {Opt_err_panic, "errors=panic"},
  1204. {Opt_err_ro, "errors=remount-ro"},
  1205. {Opt_nouid32, "nouid32"},
  1206. {Opt_debug, "debug"},
  1207. {Opt_removed, "oldalloc"},
  1208. {Opt_removed, "orlov"},
  1209. {Opt_user_xattr, "user_xattr"},
  1210. {Opt_nouser_xattr, "nouser_xattr"},
  1211. {Opt_acl, "acl"},
  1212. {Opt_noacl, "noacl"},
  1213. {Opt_noload, "norecovery"},
  1214. {Opt_noload, "noload"},
  1215. {Opt_removed, "nobh"},
  1216. {Opt_removed, "bh"},
  1217. {Opt_commit, "commit=%u"},
  1218. {Opt_min_batch_time, "min_batch_time=%u"},
  1219. {Opt_max_batch_time, "max_batch_time=%u"},
  1220. {Opt_journal_dev, "journal_dev=%u"},
  1221. {Opt_journal_path, "journal_path=%s"},
  1222. {Opt_journal_checksum, "journal_checksum"},
  1223. {Opt_nojournal_checksum, "nojournal_checksum"},
  1224. {Opt_journal_async_commit, "journal_async_commit"},
  1225. {Opt_abort, "abort"},
  1226. {Opt_data_journal, "data=journal"},
  1227. {Opt_data_ordered, "data=ordered"},
  1228. {Opt_data_writeback, "data=writeback"},
  1229. {Opt_data_err_abort, "data_err=abort"},
  1230. {Opt_data_err_ignore, "data_err=ignore"},
  1231. {Opt_offusrjquota, "usrjquota="},
  1232. {Opt_usrjquota, "usrjquota=%s"},
  1233. {Opt_offgrpjquota, "grpjquota="},
  1234. {Opt_grpjquota, "grpjquota=%s"},
  1235. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  1236. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  1237. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  1238. {Opt_grpquota, "grpquota"},
  1239. {Opt_noquota, "noquota"},
  1240. {Opt_quota, "quota"},
  1241. {Opt_usrquota, "usrquota"},
  1242. {Opt_prjquota, "prjquota"},
  1243. {Opt_barrier, "barrier=%u"},
  1244. {Opt_barrier, "barrier"},
  1245. {Opt_nobarrier, "nobarrier"},
  1246. {Opt_i_version, "i_version"},
  1247. {Opt_dax, "dax"},
  1248. {Opt_stripe, "stripe=%u"},
  1249. {Opt_delalloc, "delalloc"},
  1250. {Opt_lazytime, "lazytime"},
  1251. {Opt_nolazytime, "nolazytime"},
  1252. {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
  1253. {Opt_nodelalloc, "nodelalloc"},
  1254. {Opt_removed, "mblk_io_submit"},
  1255. {Opt_removed, "nomblk_io_submit"},
  1256. {Opt_block_validity, "block_validity"},
  1257. {Opt_noblock_validity, "noblock_validity"},
  1258. {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
  1259. {Opt_journal_ioprio, "journal_ioprio=%u"},
  1260. {Opt_auto_da_alloc, "auto_da_alloc=%u"},
  1261. {Opt_auto_da_alloc, "auto_da_alloc"},
  1262. {Opt_noauto_da_alloc, "noauto_da_alloc"},
  1263. {Opt_dioread_nolock, "dioread_nolock"},
  1264. {Opt_dioread_lock, "dioread_lock"},
  1265. {Opt_discard, "discard"},
  1266. {Opt_nodiscard, "nodiscard"},
  1267. {Opt_init_itable, "init_itable=%u"},
  1268. {Opt_init_itable, "init_itable"},
  1269. {Opt_noinit_itable, "noinit_itable"},
  1270. {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
  1271. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  1272. {Opt_nombcache, "nombcache"},
  1273. {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
  1274. {Opt_removed, "check=none"}, /* mount option from ext2/3 */
  1275. {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
  1276. {Opt_removed, "reservation"}, /* mount option from ext2/3 */
  1277. {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
  1278. {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
  1279. {Opt_err, NULL},
  1280. };
  1281. static ext4_fsblk_t get_sb_block(void **data)
  1282. {
  1283. ext4_fsblk_t sb_block;
  1284. char *options = (char *) *data;
  1285. if (!options || strncmp(options, "sb=", 3) != 0)
  1286. return 1; /* Default location */
  1287. options += 3;
  1288. /* TODO: use simple_strtoll with >32bit ext4 */
  1289. sb_block = simple_strtoul(options, &options, 0);
  1290. if (*options && *options != ',') {
  1291. printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
  1292. (char *) *data);
  1293. return 1;
  1294. }
  1295. if (*options == ',')
  1296. options++;
  1297. *data = (void *) options;
  1298. return sb_block;
  1299. }
  1300. #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
  1301. static const char deprecated_msg[] =
  1302. "Mount option \"%s\" will be removed by %s\n"
  1303. "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
  1304. #ifdef CONFIG_QUOTA
  1305. static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
  1306. {
  1307. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1308. char *qname;
  1309. int ret = -1;
  1310. if (sb_any_quota_loaded(sb) &&
  1311. !sbi->s_qf_names[qtype]) {
  1312. ext4_msg(sb, KERN_ERR,
  1313. "Cannot change journaled "
  1314. "quota options when quota turned on");
  1315. return -1;
  1316. }
  1317. if (ext4_has_feature_quota(sb)) {
  1318. ext4_msg(sb, KERN_INFO, "Journaled quota options "
  1319. "ignored when QUOTA feature is enabled");
  1320. return 1;
  1321. }
  1322. qname = match_strdup(args);
  1323. if (!qname) {
  1324. ext4_msg(sb, KERN_ERR,
  1325. "Not enough memory for storing quotafile name");
  1326. return -1;
  1327. }
  1328. if (sbi->s_qf_names[qtype]) {
  1329. if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
  1330. ret = 1;
  1331. else
  1332. ext4_msg(sb, KERN_ERR,
  1333. "%s quota file already specified",
  1334. QTYPE2NAME(qtype));
  1335. goto errout;
  1336. }
  1337. if (strchr(qname, '/')) {
  1338. ext4_msg(sb, KERN_ERR,
  1339. "quotafile must be on filesystem root");
  1340. goto errout;
  1341. }
  1342. sbi->s_qf_names[qtype] = qname;
  1343. set_opt(sb, QUOTA);
  1344. return 1;
  1345. errout:
  1346. kfree(qname);
  1347. return ret;
  1348. }
  1349. static int clear_qf_name(struct super_block *sb, int qtype)
  1350. {
  1351. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1352. if (sb_any_quota_loaded(sb) &&
  1353. sbi->s_qf_names[qtype]) {
  1354. ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
  1355. " when quota turned on");
  1356. return -1;
  1357. }
  1358. kfree(sbi->s_qf_names[qtype]);
  1359. sbi->s_qf_names[qtype] = NULL;
  1360. return 1;
  1361. }
  1362. #endif
  1363. #define MOPT_SET 0x0001
  1364. #define MOPT_CLEAR 0x0002
  1365. #define MOPT_NOSUPPORT 0x0004
  1366. #define MOPT_EXPLICIT 0x0008
  1367. #define MOPT_CLEAR_ERR 0x0010
  1368. #define MOPT_GTE0 0x0020
  1369. #ifdef CONFIG_QUOTA
  1370. #define MOPT_Q 0
  1371. #define MOPT_QFMT 0x0040
  1372. #else
  1373. #define MOPT_Q MOPT_NOSUPPORT
  1374. #define MOPT_QFMT MOPT_NOSUPPORT
  1375. #endif
  1376. #define MOPT_DATAJ 0x0080
  1377. #define MOPT_NO_EXT2 0x0100
  1378. #define MOPT_NO_EXT3 0x0200
  1379. #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
  1380. #define MOPT_STRING 0x0400
  1381. static const struct mount_opts {
  1382. int token;
  1383. int mount_opt;
  1384. int flags;
  1385. } ext4_mount_opts[] = {
  1386. {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
  1387. {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
  1388. {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
  1389. {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
  1390. {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
  1391. {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
  1392. {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1393. MOPT_EXT4_ONLY | MOPT_SET},
  1394. {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1395. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1396. {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
  1397. {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
  1398. {Opt_delalloc, EXT4_MOUNT_DELALLOC,
  1399. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1400. {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
  1401. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1402. {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1403. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1404. {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1405. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1406. {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
  1407. EXT4_MOUNT_JOURNAL_CHECKSUM),
  1408. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1409. {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
  1410. {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
  1411. {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
  1412. {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
  1413. {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
  1414. MOPT_NO_EXT2},
  1415. {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
  1416. MOPT_NO_EXT2},
  1417. {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
  1418. {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
  1419. {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
  1420. {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
  1421. {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
  1422. {Opt_commit, 0, MOPT_GTE0},
  1423. {Opt_max_batch_time, 0, MOPT_GTE0},
  1424. {Opt_min_batch_time, 0, MOPT_GTE0},
  1425. {Opt_inode_readahead_blks, 0, MOPT_GTE0},
  1426. {Opt_init_itable, 0, MOPT_GTE0},
  1427. {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
  1428. {Opt_stripe, 0, MOPT_GTE0},
  1429. {Opt_resuid, 0, MOPT_GTE0},
  1430. {Opt_resgid, 0, MOPT_GTE0},
  1431. {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1432. {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
  1433. {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1434. {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1435. {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1436. {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
  1437. MOPT_NO_EXT2 | MOPT_DATAJ},
  1438. {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
  1439. {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
  1440. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  1441. {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
  1442. {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
  1443. #else
  1444. {Opt_acl, 0, MOPT_NOSUPPORT},
  1445. {Opt_noacl, 0, MOPT_NOSUPPORT},
  1446. #endif
  1447. {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
  1448. {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
  1449. {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
  1450. {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
  1451. {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
  1452. MOPT_SET | MOPT_Q},
  1453. {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
  1454. MOPT_SET | MOPT_Q},
  1455. {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
  1456. MOPT_SET | MOPT_Q},
  1457. {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
  1458. EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
  1459. MOPT_CLEAR | MOPT_Q},
  1460. {Opt_usrjquota, 0, MOPT_Q},
  1461. {Opt_grpjquota, 0, MOPT_Q},
  1462. {Opt_offusrjquota, 0, MOPT_Q},
  1463. {Opt_offgrpjquota, 0, MOPT_Q},
  1464. {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
  1465. {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
  1466. {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
  1467. {Opt_max_dir_size_kb, 0, MOPT_GTE0},
  1468. {Opt_test_dummy_encryption, 0, MOPT_GTE0},
  1469. {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
  1470. {Opt_err, 0, 0}
  1471. };
  1472. static int handle_mount_opt(struct super_block *sb, char *opt, int token,
  1473. substring_t *args, unsigned long *journal_devnum,
  1474. unsigned int *journal_ioprio, int is_remount)
  1475. {
  1476. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1477. const struct mount_opts *m;
  1478. kuid_t uid;
  1479. kgid_t gid;
  1480. int arg = 0;
  1481. #ifdef CONFIG_QUOTA
  1482. if (token == Opt_usrjquota)
  1483. return set_qf_name(sb, USRQUOTA, &args[0]);
  1484. else if (token == Opt_grpjquota)
  1485. return set_qf_name(sb, GRPQUOTA, &args[0]);
  1486. else if (token == Opt_offusrjquota)
  1487. return clear_qf_name(sb, USRQUOTA);
  1488. else if (token == Opt_offgrpjquota)
  1489. return clear_qf_name(sb, GRPQUOTA);
  1490. #endif
  1491. switch (token) {
  1492. case Opt_noacl:
  1493. case Opt_nouser_xattr:
  1494. ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
  1495. break;
  1496. case Opt_sb:
  1497. return 1; /* handled by get_sb_block() */
  1498. case Opt_removed:
  1499. ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
  1500. return 1;
  1501. case Opt_abort:
  1502. sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
  1503. return 1;
  1504. case Opt_i_version:
  1505. sb->s_flags |= MS_I_VERSION;
  1506. return 1;
  1507. case Opt_lazytime:
  1508. sb->s_flags |= MS_LAZYTIME;
  1509. return 1;
  1510. case Opt_nolazytime:
  1511. sb->s_flags &= ~MS_LAZYTIME;
  1512. return 1;
  1513. }
  1514. for (m = ext4_mount_opts; m->token != Opt_err; m++)
  1515. if (token == m->token)
  1516. break;
  1517. if (m->token == Opt_err) {
  1518. ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
  1519. "or missing value", opt);
  1520. return -1;
  1521. }
  1522. if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
  1523. ext4_msg(sb, KERN_ERR,
  1524. "Mount option \"%s\" incompatible with ext2", opt);
  1525. return -1;
  1526. }
  1527. if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
  1528. ext4_msg(sb, KERN_ERR,
  1529. "Mount option \"%s\" incompatible with ext3", opt);
  1530. return -1;
  1531. }
  1532. if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
  1533. return -1;
  1534. if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
  1535. return -1;
  1536. if (m->flags & MOPT_EXPLICIT) {
  1537. if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
  1538. set_opt2(sb, EXPLICIT_DELALLOC);
  1539. } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
  1540. set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
  1541. } else
  1542. return -1;
  1543. }
  1544. if (m->flags & MOPT_CLEAR_ERR)
  1545. clear_opt(sb, ERRORS_MASK);
  1546. if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
  1547. ext4_msg(sb, KERN_ERR, "Cannot change quota "
  1548. "options when quota turned on");
  1549. return -1;
  1550. }
  1551. if (m->flags & MOPT_NOSUPPORT) {
  1552. ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
  1553. } else if (token == Opt_commit) {
  1554. if (arg == 0)
  1555. arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
  1556. sbi->s_commit_interval = HZ * arg;
  1557. } else if (token == Opt_debug_want_extra_isize) {
  1558. sbi->s_want_extra_isize = arg;
  1559. } else if (token == Opt_max_batch_time) {
  1560. sbi->s_max_batch_time = arg;
  1561. } else if (token == Opt_min_batch_time) {
  1562. sbi->s_min_batch_time = arg;
  1563. } else if (token == Opt_inode_readahead_blks) {
  1564. if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
  1565. ext4_msg(sb, KERN_ERR,
  1566. "EXT4-fs: inode_readahead_blks must be "
  1567. "0 or a power of 2 smaller than 2^31");
  1568. return -1;
  1569. }
  1570. sbi->s_inode_readahead_blks = arg;
  1571. } else if (token == Opt_init_itable) {
  1572. set_opt(sb, INIT_INODE_TABLE);
  1573. if (!args->from)
  1574. arg = EXT4_DEF_LI_WAIT_MULT;
  1575. sbi->s_li_wait_mult = arg;
  1576. } else if (token == Opt_max_dir_size_kb) {
  1577. sbi->s_max_dir_size_kb = arg;
  1578. } else if (token == Opt_stripe) {
  1579. sbi->s_stripe = arg;
  1580. } else if (token == Opt_resuid) {
  1581. uid = make_kuid(current_user_ns(), arg);
  1582. if (!uid_valid(uid)) {
  1583. ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
  1584. return -1;
  1585. }
  1586. sbi->s_resuid = uid;
  1587. } else if (token == Opt_resgid) {
  1588. gid = make_kgid(current_user_ns(), arg);
  1589. if (!gid_valid(gid)) {
  1590. ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
  1591. return -1;
  1592. }
  1593. sbi->s_resgid = gid;
  1594. } else if (token == Opt_journal_dev) {
  1595. if (is_remount) {
  1596. ext4_msg(sb, KERN_ERR,
  1597. "Cannot specify journal on remount");
  1598. return -1;
  1599. }
  1600. *journal_devnum = arg;
  1601. } else if (token == Opt_journal_path) {
  1602. char *journal_path;
  1603. struct inode *journal_inode;
  1604. struct path path;
  1605. int error;
  1606. if (is_remount) {
  1607. ext4_msg(sb, KERN_ERR,
  1608. "Cannot specify journal on remount");
  1609. return -1;
  1610. }
  1611. journal_path = match_strdup(&args[0]);
  1612. if (!journal_path) {
  1613. ext4_msg(sb, KERN_ERR, "error: could not dup "
  1614. "journal device string");
  1615. return -1;
  1616. }
  1617. error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
  1618. if (error) {
  1619. ext4_msg(sb, KERN_ERR, "error: could not find "
  1620. "journal device path: error %d", error);
  1621. kfree(journal_path);
  1622. return -1;
  1623. }
  1624. journal_inode = d_inode(path.dentry);
  1625. if (!S_ISBLK(journal_inode->i_mode)) {
  1626. ext4_msg(sb, KERN_ERR, "error: journal path %s "
  1627. "is not a block device", journal_path);
  1628. path_put(&path);
  1629. kfree(journal_path);
  1630. return -1;
  1631. }
  1632. *journal_devnum = new_encode_dev(journal_inode->i_rdev);
  1633. path_put(&path);
  1634. kfree(journal_path);
  1635. } else if (token == Opt_journal_ioprio) {
  1636. if (arg > 7) {
  1637. ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
  1638. " (must be 0-7)");
  1639. return -1;
  1640. }
  1641. *journal_ioprio =
  1642. IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
  1643. } else if (token == Opt_test_dummy_encryption) {
  1644. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1645. sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
  1646. ext4_msg(sb, KERN_WARNING,
  1647. "Test dummy encryption mode enabled");
  1648. #else
  1649. ext4_msg(sb, KERN_WARNING,
  1650. "Test dummy encryption mount option ignored");
  1651. #endif
  1652. } else if (m->flags & MOPT_DATAJ) {
  1653. if (is_remount) {
  1654. if (!sbi->s_journal)
  1655. ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
  1656. else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
  1657. ext4_msg(sb, KERN_ERR,
  1658. "Cannot change data mode on remount");
  1659. return -1;
  1660. }
  1661. } else {
  1662. clear_opt(sb, DATA_FLAGS);
  1663. sbi->s_mount_opt |= m->mount_opt;
  1664. }
  1665. #ifdef CONFIG_QUOTA
  1666. } else if (m->flags & MOPT_QFMT) {
  1667. if (sb_any_quota_loaded(sb) &&
  1668. sbi->s_jquota_fmt != m->mount_opt) {
  1669. ext4_msg(sb, KERN_ERR, "Cannot change journaled "
  1670. "quota options when quota turned on");
  1671. return -1;
  1672. }
  1673. if (ext4_has_feature_quota(sb)) {
  1674. ext4_msg(sb, KERN_INFO,
  1675. "Quota format mount options ignored "
  1676. "when QUOTA feature is enabled");
  1677. return 1;
  1678. }
  1679. sbi->s_jquota_fmt = m->mount_opt;
  1680. #endif
  1681. } else if (token == Opt_dax) {
  1682. #ifdef CONFIG_FS_DAX
  1683. ext4_msg(sb, KERN_WARNING,
  1684. "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
  1685. sbi->s_mount_opt |= m->mount_opt;
  1686. #else
  1687. ext4_msg(sb, KERN_INFO, "dax option not supported");
  1688. return -1;
  1689. #endif
  1690. } else if (token == Opt_data_err_abort) {
  1691. sbi->s_mount_opt |= m->mount_opt;
  1692. } else if (token == Opt_data_err_ignore) {
  1693. sbi->s_mount_opt &= ~m->mount_opt;
  1694. } else {
  1695. if (!args->from)
  1696. arg = 1;
  1697. if (m->flags & MOPT_CLEAR)
  1698. arg = !arg;
  1699. else if (unlikely(!(m->flags & MOPT_SET))) {
  1700. ext4_msg(sb, KERN_WARNING,
  1701. "buggy handling of option %s", opt);
  1702. WARN_ON(1);
  1703. return -1;
  1704. }
  1705. if (arg != 0)
  1706. sbi->s_mount_opt |= m->mount_opt;
  1707. else
  1708. sbi->s_mount_opt &= ~m->mount_opt;
  1709. }
  1710. return 1;
  1711. }
  1712. static int parse_options(char *options, struct super_block *sb,
  1713. unsigned long *journal_devnum,
  1714. unsigned int *journal_ioprio,
  1715. int is_remount)
  1716. {
  1717. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1718. char *p;
  1719. substring_t args[MAX_OPT_ARGS];
  1720. int token;
  1721. if (!options)
  1722. return 1;
  1723. while ((p = strsep(&options, ",")) != NULL) {
  1724. if (!*p)
  1725. continue;
  1726. /*
  1727. * Initialize args struct so we know whether arg was
  1728. * found; some options take optional arguments.
  1729. */
  1730. args[0].to = args[0].from = NULL;
  1731. token = match_token(p, tokens, args);
  1732. if (handle_mount_opt(sb, p, token, args, journal_devnum,
  1733. journal_ioprio, is_remount) < 0)
  1734. return 0;
  1735. }
  1736. #ifdef CONFIG_QUOTA
  1737. /*
  1738. * We do the test below only for project quotas. 'usrquota' and
  1739. * 'grpquota' mount options are allowed even without quota feature
  1740. * to support legacy quotas in quota files.
  1741. */
  1742. if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
  1743. ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
  1744. "Cannot enable project quota enforcement.");
  1745. return 0;
  1746. }
  1747. if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  1748. if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
  1749. clear_opt(sb, USRQUOTA);
  1750. if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
  1751. clear_opt(sb, GRPQUOTA);
  1752. if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
  1753. ext4_msg(sb, KERN_ERR, "old and new quota "
  1754. "format mixing");
  1755. return 0;
  1756. }
  1757. if (!sbi->s_jquota_fmt) {
  1758. ext4_msg(sb, KERN_ERR, "journaled quota format "
  1759. "not specified");
  1760. return 0;
  1761. }
  1762. }
  1763. #endif
  1764. if (test_opt(sb, DIOREAD_NOLOCK)) {
  1765. int blocksize =
  1766. BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
  1767. if (blocksize < PAGE_SIZE) {
  1768. ext4_msg(sb, KERN_ERR, "can't mount with "
  1769. "dioread_nolock if block size != PAGE_SIZE");
  1770. return 0;
  1771. }
  1772. }
  1773. return 1;
  1774. }
  1775. static inline void ext4_show_quota_options(struct seq_file *seq,
  1776. struct super_block *sb)
  1777. {
  1778. #if defined(CONFIG_QUOTA)
  1779. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1780. if (sbi->s_jquota_fmt) {
  1781. char *fmtname = "";
  1782. switch (sbi->s_jquota_fmt) {
  1783. case QFMT_VFS_OLD:
  1784. fmtname = "vfsold";
  1785. break;
  1786. case QFMT_VFS_V0:
  1787. fmtname = "vfsv0";
  1788. break;
  1789. case QFMT_VFS_V1:
  1790. fmtname = "vfsv1";
  1791. break;
  1792. }
  1793. seq_printf(seq, ",jqfmt=%s", fmtname);
  1794. }
  1795. if (sbi->s_qf_names[USRQUOTA])
  1796. seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
  1797. if (sbi->s_qf_names[GRPQUOTA])
  1798. seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
  1799. #endif
  1800. }
  1801. static const char *token2str(int token)
  1802. {
  1803. const struct match_token *t;
  1804. for (t = tokens; t->token != Opt_err; t++)
  1805. if (t->token == token && !strchr(t->pattern, '='))
  1806. break;
  1807. return t->pattern;
  1808. }
  1809. /*
  1810. * Show an option if
  1811. * - it's set to a non-default value OR
  1812. * - if the per-sb default is different from the global default
  1813. */
  1814. static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
  1815. int nodefs)
  1816. {
  1817. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1818. struct ext4_super_block *es = sbi->s_es;
  1819. int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
  1820. const struct mount_opts *m;
  1821. char sep = nodefs ? '\n' : ',';
  1822. #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
  1823. #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
  1824. if (sbi->s_sb_block != 1)
  1825. SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
  1826. for (m = ext4_mount_opts; m->token != Opt_err; m++) {
  1827. int want_set = m->flags & MOPT_SET;
  1828. if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
  1829. (m->flags & MOPT_CLEAR_ERR))
  1830. continue;
  1831. if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
  1832. continue; /* skip if same as the default */
  1833. if ((want_set &&
  1834. (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
  1835. (!want_set && (sbi->s_mount_opt & m->mount_opt)))
  1836. continue; /* select Opt_noFoo vs Opt_Foo */
  1837. SEQ_OPTS_PRINT("%s", token2str(m->token));
  1838. }
  1839. if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
  1840. le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
  1841. SEQ_OPTS_PRINT("resuid=%u",
  1842. from_kuid_munged(&init_user_ns, sbi->s_resuid));
  1843. if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
  1844. le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
  1845. SEQ_OPTS_PRINT("resgid=%u",
  1846. from_kgid_munged(&init_user_ns, sbi->s_resgid));
  1847. def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
  1848. if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
  1849. SEQ_OPTS_PUTS("errors=remount-ro");
  1850. if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
  1851. SEQ_OPTS_PUTS("errors=continue");
  1852. if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
  1853. SEQ_OPTS_PUTS("errors=panic");
  1854. if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
  1855. SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
  1856. if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
  1857. SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
  1858. if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
  1859. SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
  1860. if (sb->s_flags & MS_I_VERSION)
  1861. SEQ_OPTS_PUTS("i_version");
  1862. if (nodefs || sbi->s_stripe)
  1863. SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
  1864. if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
  1865. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  1866. SEQ_OPTS_PUTS("data=journal");
  1867. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  1868. SEQ_OPTS_PUTS("data=ordered");
  1869. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
  1870. SEQ_OPTS_PUTS("data=writeback");
  1871. }
  1872. if (nodefs ||
  1873. sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
  1874. SEQ_OPTS_PRINT("inode_readahead_blks=%u",
  1875. sbi->s_inode_readahead_blks);
  1876. if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
  1877. (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
  1878. SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
  1879. if (nodefs || sbi->s_max_dir_size_kb)
  1880. SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
  1881. if (test_opt(sb, DATA_ERR_ABORT))
  1882. SEQ_OPTS_PUTS("data_err=abort");
  1883. ext4_show_quota_options(seq, sb);
  1884. return 0;
  1885. }
  1886. static int ext4_show_options(struct seq_file *seq, struct dentry *root)
  1887. {
  1888. return _ext4_show_options(seq, root->d_sb, 0);
  1889. }
  1890. int ext4_seq_options_show(struct seq_file *seq, void *offset)
  1891. {
  1892. struct super_block *sb = seq->private;
  1893. int rc;
  1894. seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
  1895. rc = _ext4_show_options(seq, sb, 1);
  1896. seq_puts(seq, "\n");
  1897. return rc;
  1898. }
  1899. static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
  1900. int read_only)
  1901. {
  1902. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1903. int res = 0;
  1904. if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
  1905. ext4_msg(sb, KERN_ERR, "revision level too high, "
  1906. "forcing read-only mode");
  1907. res = MS_RDONLY;
  1908. }
  1909. if (read_only)
  1910. goto done;
  1911. if (!(sbi->s_mount_state & EXT4_VALID_FS))
  1912. ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
  1913. "running e2fsck is recommended");
  1914. else if (sbi->s_mount_state & EXT4_ERROR_FS)
  1915. ext4_msg(sb, KERN_WARNING,
  1916. "warning: mounting fs with errors, "
  1917. "running e2fsck is recommended");
  1918. else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
  1919. le16_to_cpu(es->s_mnt_count) >=
  1920. (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
  1921. ext4_msg(sb, KERN_WARNING,
  1922. "warning: maximal mount count reached, "
  1923. "running e2fsck is recommended");
  1924. else if (le32_to_cpu(es->s_checkinterval) &&
  1925. (le32_to_cpu(es->s_lastcheck) +
  1926. le32_to_cpu(es->s_checkinterval) <= get_seconds()))
  1927. ext4_msg(sb, KERN_WARNING,
  1928. "warning: checktime reached, "
  1929. "running e2fsck is recommended");
  1930. if (!sbi->s_journal)
  1931. es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  1932. if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
  1933. es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
  1934. le16_add_cpu(&es->s_mnt_count, 1);
  1935. es->s_mtime = cpu_to_le32(get_seconds());
  1936. ext4_update_dynamic_rev(sb);
  1937. if (sbi->s_journal)
  1938. ext4_set_feature_journal_needs_recovery(sb);
  1939. ext4_commit_super(sb, 1);
  1940. done:
  1941. if (test_opt(sb, DEBUG))
  1942. printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
  1943. "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
  1944. sb->s_blocksize,
  1945. sbi->s_groups_count,
  1946. EXT4_BLOCKS_PER_GROUP(sb),
  1947. EXT4_INODES_PER_GROUP(sb),
  1948. sbi->s_mount_opt, sbi->s_mount_opt2);
  1949. cleancache_init_fs(sb);
  1950. return res;
  1951. }
  1952. int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
  1953. {
  1954. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1955. struct flex_groups *new_groups;
  1956. int size;
  1957. if (!sbi->s_log_groups_per_flex)
  1958. return 0;
  1959. size = ext4_flex_group(sbi, ngroup - 1) + 1;
  1960. if (size <= sbi->s_flex_groups_allocated)
  1961. return 0;
  1962. size = roundup_pow_of_two(size * sizeof(struct flex_groups));
  1963. new_groups = kvzalloc(size, GFP_KERNEL);
  1964. if (!new_groups) {
  1965. ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
  1966. size / (int) sizeof(struct flex_groups));
  1967. return -ENOMEM;
  1968. }
  1969. if (sbi->s_flex_groups) {
  1970. memcpy(new_groups, sbi->s_flex_groups,
  1971. (sbi->s_flex_groups_allocated *
  1972. sizeof(struct flex_groups)));
  1973. kvfree(sbi->s_flex_groups);
  1974. }
  1975. sbi->s_flex_groups = new_groups;
  1976. sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
  1977. return 0;
  1978. }
  1979. static int ext4_fill_flex_info(struct super_block *sb)
  1980. {
  1981. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1982. struct ext4_group_desc *gdp = NULL;
  1983. ext4_group_t flex_group;
  1984. int i, err;
  1985. sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
  1986. if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
  1987. sbi->s_log_groups_per_flex = 0;
  1988. return 1;
  1989. }
  1990. err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
  1991. if (err)
  1992. goto failed;
  1993. for (i = 0; i < sbi->s_groups_count; i++) {
  1994. gdp = ext4_get_group_desc(sb, i, NULL);
  1995. flex_group = ext4_flex_group(sbi, i);
  1996. atomic_add(ext4_free_inodes_count(sb, gdp),
  1997. &sbi->s_flex_groups[flex_group].free_inodes);
  1998. atomic64_add(ext4_free_group_clusters(sb, gdp),
  1999. &sbi->s_flex_groups[flex_group].free_clusters);
  2000. atomic_add(ext4_used_dirs_count(sb, gdp),
  2001. &sbi->s_flex_groups[flex_group].used_dirs);
  2002. }
  2003. return 1;
  2004. failed:
  2005. return 0;
  2006. }
  2007. static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
  2008. struct ext4_group_desc *gdp)
  2009. {
  2010. int offset = offsetof(struct ext4_group_desc, bg_checksum);
  2011. __u16 crc = 0;
  2012. __le32 le_group = cpu_to_le32(block_group);
  2013. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2014. if (ext4_has_metadata_csum(sbi->s_sb)) {
  2015. /* Use new metadata_csum algorithm */
  2016. __u32 csum32;
  2017. __u16 dummy_csum = 0;
  2018. csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
  2019. sizeof(le_group));
  2020. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
  2021. csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
  2022. sizeof(dummy_csum));
  2023. offset += sizeof(dummy_csum);
  2024. if (offset < sbi->s_desc_size)
  2025. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
  2026. sbi->s_desc_size - offset);
  2027. crc = csum32 & 0xFFFF;
  2028. goto out;
  2029. }
  2030. /* old crc16 code */
  2031. if (!ext4_has_feature_gdt_csum(sb))
  2032. return 0;
  2033. crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
  2034. crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
  2035. crc = crc16(crc, (__u8 *)gdp, offset);
  2036. offset += sizeof(gdp->bg_checksum); /* skip checksum */
  2037. /* for checksum of struct ext4_group_desc do the rest...*/
  2038. if (ext4_has_feature_64bit(sb) &&
  2039. offset < le16_to_cpu(sbi->s_es->s_desc_size))
  2040. crc = crc16(crc, (__u8 *)gdp + offset,
  2041. le16_to_cpu(sbi->s_es->s_desc_size) -
  2042. offset);
  2043. out:
  2044. return cpu_to_le16(crc);
  2045. }
  2046. int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
  2047. struct ext4_group_desc *gdp)
  2048. {
  2049. if (ext4_has_group_desc_csum(sb) &&
  2050. (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
  2051. return 0;
  2052. return 1;
  2053. }
  2054. void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
  2055. struct ext4_group_desc *gdp)
  2056. {
  2057. if (!ext4_has_group_desc_csum(sb))
  2058. return;
  2059. gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
  2060. }
  2061. /* Called at mount-time, super-block is locked */
  2062. static int ext4_check_descriptors(struct super_block *sb,
  2063. ext4_fsblk_t sb_block,
  2064. ext4_group_t *first_not_zeroed)
  2065. {
  2066. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2067. ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
  2068. ext4_fsblk_t last_block;
  2069. ext4_fsblk_t block_bitmap;
  2070. ext4_fsblk_t inode_bitmap;
  2071. ext4_fsblk_t inode_table;
  2072. int flexbg_flag = 0;
  2073. ext4_group_t i, grp = sbi->s_groups_count;
  2074. if (ext4_has_feature_flex_bg(sb))
  2075. flexbg_flag = 1;
  2076. ext4_debug("Checking group descriptors");
  2077. for (i = 0; i < sbi->s_groups_count; i++) {
  2078. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  2079. if (i == sbi->s_groups_count - 1 || flexbg_flag)
  2080. last_block = ext4_blocks_count(sbi->s_es) - 1;
  2081. else
  2082. last_block = first_block +
  2083. (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  2084. if ((grp == sbi->s_groups_count) &&
  2085. !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2086. grp = i;
  2087. block_bitmap = ext4_block_bitmap(sb, gdp);
  2088. if (block_bitmap == sb_block) {
  2089. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2090. "Block bitmap for group %u overlaps "
  2091. "superblock", i);
  2092. }
  2093. if (block_bitmap < first_block || block_bitmap > last_block) {
  2094. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2095. "Block bitmap for group %u not in group "
  2096. "(block %llu)!", i, block_bitmap);
  2097. return 0;
  2098. }
  2099. inode_bitmap = ext4_inode_bitmap(sb, gdp);
  2100. if (inode_bitmap == sb_block) {
  2101. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2102. "Inode bitmap for group %u overlaps "
  2103. "superblock", i);
  2104. }
  2105. if (inode_bitmap < first_block || inode_bitmap > last_block) {
  2106. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2107. "Inode bitmap for group %u not in group "
  2108. "(block %llu)!", i, inode_bitmap);
  2109. return 0;
  2110. }
  2111. inode_table = ext4_inode_table(sb, gdp);
  2112. if (inode_table == sb_block) {
  2113. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2114. "Inode table for group %u overlaps "
  2115. "superblock", i);
  2116. }
  2117. if (inode_table < first_block ||
  2118. inode_table + sbi->s_itb_per_group - 1 > last_block) {
  2119. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2120. "Inode table for group %u not in group "
  2121. "(block %llu)!", i, inode_table);
  2122. return 0;
  2123. }
  2124. ext4_lock_group(sb, i);
  2125. if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
  2126. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2127. "Checksum for group %u failed (%u!=%u)",
  2128. i, le16_to_cpu(ext4_group_desc_csum(sb, i,
  2129. gdp)), le16_to_cpu(gdp->bg_checksum));
  2130. if (!sb_rdonly(sb)) {
  2131. ext4_unlock_group(sb, i);
  2132. return 0;
  2133. }
  2134. }
  2135. ext4_unlock_group(sb, i);
  2136. if (!flexbg_flag)
  2137. first_block += EXT4_BLOCKS_PER_GROUP(sb);
  2138. }
  2139. if (NULL != first_not_zeroed)
  2140. *first_not_zeroed = grp;
  2141. return 1;
  2142. }
  2143. /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
  2144. * the superblock) which were deleted from all directories, but held open by
  2145. * a process at the time of a crash. We walk the list and try to delete these
  2146. * inodes at recovery time (only with a read-write filesystem).
  2147. *
  2148. * In order to keep the orphan inode chain consistent during traversal (in
  2149. * case of crash during recovery), we link each inode into the superblock
  2150. * orphan list_head and handle it the same way as an inode deletion during
  2151. * normal operation (which journals the operations for us).
  2152. *
  2153. * We only do an iget() and an iput() on each inode, which is very safe if we
  2154. * accidentally point at an in-use or already deleted inode. The worst that
  2155. * can happen in this case is that we get a "bit already cleared" message from
  2156. * ext4_free_inode(). The only reason we would point at a wrong inode is if
  2157. * e2fsck was run on this filesystem, and it must have already done the orphan
  2158. * inode cleanup for us, so we can safely abort without any further action.
  2159. */
  2160. static void ext4_orphan_cleanup(struct super_block *sb,
  2161. struct ext4_super_block *es)
  2162. {
  2163. unsigned int s_flags = sb->s_flags;
  2164. int ret, nr_orphans = 0, nr_truncates = 0;
  2165. #ifdef CONFIG_QUOTA
  2166. int quota_update = 0;
  2167. int i;
  2168. #endif
  2169. if (!es->s_last_orphan) {
  2170. jbd_debug(4, "no orphan inodes to clean up\n");
  2171. return;
  2172. }
  2173. if (bdev_read_only(sb->s_bdev)) {
  2174. ext4_msg(sb, KERN_ERR, "write access "
  2175. "unavailable, skipping orphan cleanup");
  2176. return;
  2177. }
  2178. /* Check if feature set would not allow a r/w mount */
  2179. if (!ext4_feature_set_ok(sb, 0)) {
  2180. ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
  2181. "unknown ROCOMPAT features");
  2182. return;
  2183. }
  2184. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2185. /* don't clear list on RO mount w/ errors */
  2186. if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
  2187. ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
  2188. "clearing orphan list.\n");
  2189. es->s_last_orphan = 0;
  2190. }
  2191. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2192. return;
  2193. }
  2194. if (s_flags & MS_RDONLY) {
  2195. ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
  2196. sb->s_flags &= ~MS_RDONLY;
  2197. }
  2198. #ifdef CONFIG_QUOTA
  2199. /* Needed for iput() to work correctly and not trash data */
  2200. sb->s_flags |= MS_ACTIVE;
  2201. /*
  2202. * Turn on quotas which were not enabled for read-only mounts if
  2203. * filesystem has quota feature, so that they are updated correctly.
  2204. */
  2205. if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
  2206. int ret = ext4_enable_quotas(sb);
  2207. if (!ret)
  2208. quota_update = 1;
  2209. else
  2210. ext4_msg(sb, KERN_ERR,
  2211. "Cannot turn on quotas: error %d", ret);
  2212. }
  2213. /* Turn on journaled quotas used for old sytle */
  2214. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2215. if (EXT4_SB(sb)->s_qf_names[i]) {
  2216. int ret = ext4_quota_on_mount(sb, i);
  2217. if (!ret)
  2218. quota_update = 1;
  2219. else
  2220. ext4_msg(sb, KERN_ERR,
  2221. "Cannot turn on journaled "
  2222. "quota: type %d: error %d", i, ret);
  2223. }
  2224. }
  2225. #endif
  2226. while (es->s_last_orphan) {
  2227. struct inode *inode;
  2228. /*
  2229. * We may have encountered an error during cleanup; if
  2230. * so, skip the rest.
  2231. */
  2232. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2233. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2234. es->s_last_orphan = 0;
  2235. break;
  2236. }
  2237. inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
  2238. if (IS_ERR(inode)) {
  2239. es->s_last_orphan = 0;
  2240. break;
  2241. }
  2242. list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
  2243. dquot_initialize(inode);
  2244. if (inode->i_nlink) {
  2245. if (test_opt(sb, DEBUG))
  2246. ext4_msg(sb, KERN_DEBUG,
  2247. "%s: truncating inode %lu to %lld bytes",
  2248. __func__, inode->i_ino, inode->i_size);
  2249. jbd_debug(2, "truncating inode %lu to %lld bytes\n",
  2250. inode->i_ino, inode->i_size);
  2251. inode_lock(inode);
  2252. truncate_inode_pages(inode->i_mapping, inode->i_size);
  2253. ret = ext4_truncate(inode);
  2254. if (ret)
  2255. ext4_std_error(inode->i_sb, ret);
  2256. inode_unlock(inode);
  2257. nr_truncates++;
  2258. } else {
  2259. if (test_opt(sb, DEBUG))
  2260. ext4_msg(sb, KERN_DEBUG,
  2261. "%s: deleting unreferenced inode %lu",
  2262. __func__, inode->i_ino);
  2263. jbd_debug(2, "deleting unreferenced inode %lu\n",
  2264. inode->i_ino);
  2265. nr_orphans++;
  2266. }
  2267. iput(inode); /* The delete magic happens here! */
  2268. }
  2269. #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
  2270. if (nr_orphans)
  2271. ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
  2272. PLURAL(nr_orphans));
  2273. if (nr_truncates)
  2274. ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
  2275. PLURAL(nr_truncates));
  2276. #ifdef CONFIG_QUOTA
  2277. /* Turn off quotas if they were enabled for orphan cleanup */
  2278. if (quota_update) {
  2279. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2280. if (sb_dqopt(sb)->files[i])
  2281. dquot_quota_off(sb, i);
  2282. }
  2283. }
  2284. #endif
  2285. sb->s_flags = s_flags; /* Restore MS_RDONLY status */
  2286. }
  2287. /*
  2288. * Maximal extent format file size.
  2289. * Resulting logical blkno at s_maxbytes must fit in our on-disk
  2290. * extent format containers, within a sector_t, and within i_blocks
  2291. * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
  2292. * so that won't be a limiting factor.
  2293. *
  2294. * However there is other limiting factor. We do store extents in the form
  2295. * of starting block and length, hence the resulting length of the extent
  2296. * covering maximum file size must fit into on-disk format containers as
  2297. * well. Given that length is always by 1 unit bigger than max unit (because
  2298. * we count 0 as well) we have to lower the s_maxbytes by one fs block.
  2299. *
  2300. * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  2301. */
  2302. static loff_t ext4_max_size(int blkbits, int has_huge_files)
  2303. {
  2304. loff_t res;
  2305. loff_t upper_limit = MAX_LFS_FILESIZE;
  2306. /* small i_blocks in vfs inode? */
  2307. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2308. /*
  2309. * CONFIG_LBDAF is not enabled implies the inode
  2310. * i_block represent total blocks in 512 bytes
  2311. * 32 == size of vfs inode i_blocks * 8
  2312. */
  2313. upper_limit = (1LL << 32) - 1;
  2314. /* total blocks in file system block size */
  2315. upper_limit >>= (blkbits - 9);
  2316. upper_limit <<= blkbits;
  2317. }
  2318. /*
  2319. * 32-bit extent-start container, ee_block. We lower the maxbytes
  2320. * by one fs block, so ee_len can cover the extent of maximum file
  2321. * size
  2322. */
  2323. res = (1LL << 32) - 1;
  2324. res <<= blkbits;
  2325. /* Sanity check against vm- & vfs- imposed limits */
  2326. if (res > upper_limit)
  2327. res = upper_limit;
  2328. return res;
  2329. }
  2330. /*
  2331. * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
  2332. * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
  2333. * We need to be 1 filesystem block less than the 2^48 sector limit.
  2334. */
  2335. static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
  2336. {
  2337. loff_t res = EXT4_NDIR_BLOCKS;
  2338. int meta_blocks;
  2339. loff_t upper_limit;
  2340. /* This is calculated to be the largest file size for a dense, block
  2341. * mapped file such that the file's total number of 512-byte sectors,
  2342. * including data and all indirect blocks, does not exceed (2^48 - 1).
  2343. *
  2344. * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
  2345. * number of 512-byte sectors of the file.
  2346. */
  2347. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2348. /*
  2349. * !has_huge_files or CONFIG_LBDAF not enabled implies that
  2350. * the inode i_block field represents total file blocks in
  2351. * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
  2352. */
  2353. upper_limit = (1LL << 32) - 1;
  2354. /* total blocks in file system block size */
  2355. upper_limit >>= (bits - 9);
  2356. } else {
  2357. /*
  2358. * We use 48 bit ext4_inode i_blocks
  2359. * With EXT4_HUGE_FILE_FL set the i_blocks
  2360. * represent total number of blocks in
  2361. * file system block size
  2362. */
  2363. upper_limit = (1LL << 48) - 1;
  2364. }
  2365. /* indirect blocks */
  2366. meta_blocks = 1;
  2367. /* double indirect blocks */
  2368. meta_blocks += 1 + (1LL << (bits-2));
  2369. /* tripple indirect blocks */
  2370. meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
  2371. upper_limit -= meta_blocks;
  2372. upper_limit <<= bits;
  2373. res += 1LL << (bits-2);
  2374. res += 1LL << (2*(bits-2));
  2375. res += 1LL << (3*(bits-2));
  2376. res <<= bits;
  2377. if (res > upper_limit)
  2378. res = upper_limit;
  2379. if (res > MAX_LFS_FILESIZE)
  2380. res = MAX_LFS_FILESIZE;
  2381. return res;
  2382. }
  2383. static ext4_fsblk_t descriptor_loc(struct super_block *sb,
  2384. ext4_fsblk_t logical_sb_block, int nr)
  2385. {
  2386. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2387. ext4_group_t bg, first_meta_bg;
  2388. int has_super = 0;
  2389. first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
  2390. if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
  2391. return logical_sb_block + nr + 1;
  2392. bg = sbi->s_desc_per_block * nr;
  2393. if (ext4_bg_has_super(sb, bg))
  2394. has_super = 1;
  2395. /*
  2396. * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
  2397. * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
  2398. * on modern mke2fs or blksize > 1k on older mke2fs) then we must
  2399. * compensate.
  2400. */
  2401. if (sb->s_blocksize == 1024 && nr == 0 &&
  2402. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
  2403. has_super++;
  2404. return (has_super + ext4_group_first_block_no(sb, bg));
  2405. }
  2406. /**
  2407. * ext4_get_stripe_size: Get the stripe size.
  2408. * @sbi: In memory super block info
  2409. *
  2410. * If we have specified it via mount option, then
  2411. * use the mount option value. If the value specified at mount time is
  2412. * greater than the blocks per group use the super block value.
  2413. * If the super block value is greater than blocks per group return 0.
  2414. * Allocator needs it be less than blocks per group.
  2415. *
  2416. */
  2417. static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
  2418. {
  2419. unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
  2420. unsigned long stripe_width =
  2421. le32_to_cpu(sbi->s_es->s_raid_stripe_width);
  2422. int ret;
  2423. if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
  2424. ret = sbi->s_stripe;
  2425. else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
  2426. ret = stripe_width;
  2427. else if (stride && stride <= sbi->s_blocks_per_group)
  2428. ret = stride;
  2429. else
  2430. ret = 0;
  2431. /*
  2432. * If the stripe width is 1, this makes no sense and
  2433. * we set it to 0 to turn off stripe handling code.
  2434. */
  2435. if (ret <= 1)
  2436. ret = 0;
  2437. return ret;
  2438. }
  2439. /*
  2440. * Check whether this filesystem can be mounted based on
  2441. * the features present and the RDONLY/RDWR mount requested.
  2442. * Returns 1 if this filesystem can be mounted as requested,
  2443. * 0 if it cannot be.
  2444. */
  2445. static int ext4_feature_set_ok(struct super_block *sb, int readonly)
  2446. {
  2447. if (ext4_has_unknown_ext4_incompat_features(sb)) {
  2448. ext4_msg(sb, KERN_ERR,
  2449. "Couldn't mount because of "
  2450. "unsupported optional features (%x)",
  2451. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
  2452. ~EXT4_FEATURE_INCOMPAT_SUPP));
  2453. return 0;
  2454. }
  2455. if (readonly)
  2456. return 1;
  2457. if (ext4_has_feature_readonly(sb)) {
  2458. ext4_msg(sb, KERN_INFO, "filesystem is read-only");
  2459. sb->s_flags |= MS_RDONLY;
  2460. return 1;
  2461. }
  2462. /* Check that feature set is OK for a read-write mount */
  2463. if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
  2464. ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
  2465. "unsupported optional features (%x)",
  2466. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
  2467. ~EXT4_FEATURE_RO_COMPAT_SUPP));
  2468. return 0;
  2469. }
  2470. /*
  2471. * Large file size enabled file system can only be mounted
  2472. * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
  2473. */
  2474. if (ext4_has_feature_huge_file(sb)) {
  2475. if (sizeof(blkcnt_t) < sizeof(u64)) {
  2476. ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
  2477. "cannot be mounted RDWR without "
  2478. "CONFIG_LBDAF");
  2479. return 0;
  2480. }
  2481. }
  2482. if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
  2483. ext4_msg(sb, KERN_ERR,
  2484. "Can't support bigalloc feature without "
  2485. "extents feature\n");
  2486. return 0;
  2487. }
  2488. #ifndef CONFIG_QUOTA
  2489. if (ext4_has_feature_quota(sb) && !readonly) {
  2490. ext4_msg(sb, KERN_ERR,
  2491. "Filesystem with quota feature cannot be mounted RDWR "
  2492. "without CONFIG_QUOTA");
  2493. return 0;
  2494. }
  2495. if (ext4_has_feature_project(sb) && !readonly) {
  2496. ext4_msg(sb, KERN_ERR,
  2497. "Filesystem with project quota feature cannot be mounted RDWR "
  2498. "without CONFIG_QUOTA");
  2499. return 0;
  2500. }
  2501. #endif /* CONFIG_QUOTA */
  2502. return 1;
  2503. }
  2504. /*
  2505. * This function is called once a day if we have errors logged
  2506. * on the file system
  2507. */
  2508. static void print_daily_error_info(unsigned long arg)
  2509. {
  2510. struct super_block *sb = (struct super_block *) arg;
  2511. struct ext4_sb_info *sbi;
  2512. struct ext4_super_block *es;
  2513. sbi = EXT4_SB(sb);
  2514. es = sbi->s_es;
  2515. if (es->s_error_count)
  2516. /* fsck newer than v1.41.13 is needed to clean this condition. */
  2517. ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
  2518. le32_to_cpu(es->s_error_count));
  2519. if (es->s_first_error_time) {
  2520. printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
  2521. sb->s_id, le32_to_cpu(es->s_first_error_time),
  2522. (int) sizeof(es->s_first_error_func),
  2523. es->s_first_error_func,
  2524. le32_to_cpu(es->s_first_error_line));
  2525. if (es->s_first_error_ino)
  2526. printk(KERN_CONT ": inode %u",
  2527. le32_to_cpu(es->s_first_error_ino));
  2528. if (es->s_first_error_block)
  2529. printk(KERN_CONT ": block %llu", (unsigned long long)
  2530. le64_to_cpu(es->s_first_error_block));
  2531. printk(KERN_CONT "\n");
  2532. }
  2533. if (es->s_last_error_time) {
  2534. printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
  2535. sb->s_id, le32_to_cpu(es->s_last_error_time),
  2536. (int) sizeof(es->s_last_error_func),
  2537. es->s_last_error_func,
  2538. le32_to_cpu(es->s_last_error_line));
  2539. if (es->s_last_error_ino)
  2540. printk(KERN_CONT ": inode %u",
  2541. le32_to_cpu(es->s_last_error_ino));
  2542. if (es->s_last_error_block)
  2543. printk(KERN_CONT ": block %llu", (unsigned long long)
  2544. le64_to_cpu(es->s_last_error_block));
  2545. printk(KERN_CONT "\n");
  2546. }
  2547. mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
  2548. }
  2549. /* Find next suitable group and run ext4_init_inode_table */
  2550. static int ext4_run_li_request(struct ext4_li_request *elr)
  2551. {
  2552. struct ext4_group_desc *gdp = NULL;
  2553. ext4_group_t group, ngroups;
  2554. struct super_block *sb;
  2555. unsigned long timeout = 0;
  2556. int ret = 0;
  2557. sb = elr->lr_super;
  2558. ngroups = EXT4_SB(sb)->s_groups_count;
  2559. for (group = elr->lr_next_group; group < ngroups; group++) {
  2560. gdp = ext4_get_group_desc(sb, group, NULL);
  2561. if (!gdp) {
  2562. ret = 1;
  2563. break;
  2564. }
  2565. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2566. break;
  2567. }
  2568. if (group >= ngroups)
  2569. ret = 1;
  2570. if (!ret) {
  2571. timeout = jiffies;
  2572. ret = ext4_init_inode_table(sb, group,
  2573. elr->lr_timeout ? 0 : 1);
  2574. if (elr->lr_timeout == 0) {
  2575. timeout = (jiffies - timeout) *
  2576. elr->lr_sbi->s_li_wait_mult;
  2577. elr->lr_timeout = timeout;
  2578. }
  2579. elr->lr_next_sched = jiffies + elr->lr_timeout;
  2580. elr->lr_next_group = group + 1;
  2581. }
  2582. return ret;
  2583. }
  2584. /*
  2585. * Remove lr_request from the list_request and free the
  2586. * request structure. Should be called with li_list_mtx held
  2587. */
  2588. static void ext4_remove_li_request(struct ext4_li_request *elr)
  2589. {
  2590. struct ext4_sb_info *sbi;
  2591. if (!elr)
  2592. return;
  2593. sbi = elr->lr_sbi;
  2594. list_del(&elr->lr_request);
  2595. sbi->s_li_request = NULL;
  2596. kfree(elr);
  2597. }
  2598. static void ext4_unregister_li_request(struct super_block *sb)
  2599. {
  2600. mutex_lock(&ext4_li_mtx);
  2601. if (!ext4_li_info) {
  2602. mutex_unlock(&ext4_li_mtx);
  2603. return;
  2604. }
  2605. mutex_lock(&ext4_li_info->li_list_mtx);
  2606. ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
  2607. mutex_unlock(&ext4_li_info->li_list_mtx);
  2608. mutex_unlock(&ext4_li_mtx);
  2609. }
  2610. static struct task_struct *ext4_lazyinit_task;
  2611. /*
  2612. * This is the function where ext4lazyinit thread lives. It walks
  2613. * through the request list searching for next scheduled filesystem.
  2614. * When such a fs is found, run the lazy initialization request
  2615. * (ext4_rn_li_request) and keep track of the time spend in this
  2616. * function. Based on that time we compute next schedule time of
  2617. * the request. When walking through the list is complete, compute
  2618. * next waking time and put itself into sleep.
  2619. */
  2620. static int ext4_lazyinit_thread(void *arg)
  2621. {
  2622. struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
  2623. struct list_head *pos, *n;
  2624. struct ext4_li_request *elr;
  2625. unsigned long next_wakeup, cur;
  2626. BUG_ON(NULL == eli);
  2627. cont_thread:
  2628. while (true) {
  2629. next_wakeup = MAX_JIFFY_OFFSET;
  2630. mutex_lock(&eli->li_list_mtx);
  2631. if (list_empty(&eli->li_request_list)) {
  2632. mutex_unlock(&eli->li_list_mtx);
  2633. goto exit_thread;
  2634. }
  2635. list_for_each_safe(pos, n, &eli->li_request_list) {
  2636. int err = 0;
  2637. int progress = 0;
  2638. elr = list_entry(pos, struct ext4_li_request,
  2639. lr_request);
  2640. if (time_before(jiffies, elr->lr_next_sched)) {
  2641. if (time_before(elr->lr_next_sched, next_wakeup))
  2642. next_wakeup = elr->lr_next_sched;
  2643. continue;
  2644. }
  2645. if (down_read_trylock(&elr->lr_super->s_umount)) {
  2646. if (sb_start_write_trylock(elr->lr_super)) {
  2647. progress = 1;
  2648. /*
  2649. * We hold sb->s_umount, sb can not
  2650. * be removed from the list, it is
  2651. * now safe to drop li_list_mtx
  2652. */
  2653. mutex_unlock(&eli->li_list_mtx);
  2654. err = ext4_run_li_request(elr);
  2655. sb_end_write(elr->lr_super);
  2656. mutex_lock(&eli->li_list_mtx);
  2657. n = pos->next;
  2658. }
  2659. up_read((&elr->lr_super->s_umount));
  2660. }
  2661. /* error, remove the lazy_init job */
  2662. if (err) {
  2663. ext4_remove_li_request(elr);
  2664. continue;
  2665. }
  2666. if (!progress) {
  2667. elr->lr_next_sched = jiffies +
  2668. (prandom_u32()
  2669. % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2670. }
  2671. if (time_before(elr->lr_next_sched, next_wakeup))
  2672. next_wakeup = elr->lr_next_sched;
  2673. }
  2674. mutex_unlock(&eli->li_list_mtx);
  2675. try_to_freeze();
  2676. cur = jiffies;
  2677. if ((time_after_eq(cur, next_wakeup)) ||
  2678. (MAX_JIFFY_OFFSET == next_wakeup)) {
  2679. cond_resched();
  2680. continue;
  2681. }
  2682. schedule_timeout_interruptible(next_wakeup - cur);
  2683. if (kthread_should_stop()) {
  2684. ext4_clear_request_list();
  2685. goto exit_thread;
  2686. }
  2687. }
  2688. exit_thread:
  2689. /*
  2690. * It looks like the request list is empty, but we need
  2691. * to check it under the li_list_mtx lock, to prevent any
  2692. * additions into it, and of course we should lock ext4_li_mtx
  2693. * to atomically free the list and ext4_li_info, because at
  2694. * this point another ext4 filesystem could be registering
  2695. * new one.
  2696. */
  2697. mutex_lock(&ext4_li_mtx);
  2698. mutex_lock(&eli->li_list_mtx);
  2699. if (!list_empty(&eli->li_request_list)) {
  2700. mutex_unlock(&eli->li_list_mtx);
  2701. mutex_unlock(&ext4_li_mtx);
  2702. goto cont_thread;
  2703. }
  2704. mutex_unlock(&eli->li_list_mtx);
  2705. kfree(ext4_li_info);
  2706. ext4_li_info = NULL;
  2707. mutex_unlock(&ext4_li_mtx);
  2708. return 0;
  2709. }
  2710. static void ext4_clear_request_list(void)
  2711. {
  2712. struct list_head *pos, *n;
  2713. struct ext4_li_request *elr;
  2714. mutex_lock(&ext4_li_info->li_list_mtx);
  2715. list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
  2716. elr = list_entry(pos, struct ext4_li_request,
  2717. lr_request);
  2718. ext4_remove_li_request(elr);
  2719. }
  2720. mutex_unlock(&ext4_li_info->li_list_mtx);
  2721. }
  2722. static int ext4_run_lazyinit_thread(void)
  2723. {
  2724. ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
  2725. ext4_li_info, "ext4lazyinit");
  2726. if (IS_ERR(ext4_lazyinit_task)) {
  2727. int err = PTR_ERR(ext4_lazyinit_task);
  2728. ext4_clear_request_list();
  2729. kfree(ext4_li_info);
  2730. ext4_li_info = NULL;
  2731. printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
  2732. "initialization thread\n",
  2733. err);
  2734. return err;
  2735. }
  2736. ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
  2737. return 0;
  2738. }
  2739. /*
  2740. * Check whether it make sense to run itable init. thread or not.
  2741. * If there is at least one uninitialized inode table, return
  2742. * corresponding group number, else the loop goes through all
  2743. * groups and return total number of groups.
  2744. */
  2745. static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
  2746. {
  2747. ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
  2748. struct ext4_group_desc *gdp = NULL;
  2749. for (group = 0; group < ngroups; group++) {
  2750. gdp = ext4_get_group_desc(sb, group, NULL);
  2751. if (!gdp)
  2752. continue;
  2753. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2754. break;
  2755. }
  2756. return group;
  2757. }
  2758. static int ext4_li_info_new(void)
  2759. {
  2760. struct ext4_lazy_init *eli = NULL;
  2761. eli = kzalloc(sizeof(*eli), GFP_KERNEL);
  2762. if (!eli)
  2763. return -ENOMEM;
  2764. INIT_LIST_HEAD(&eli->li_request_list);
  2765. mutex_init(&eli->li_list_mtx);
  2766. eli->li_state |= EXT4_LAZYINIT_QUIT;
  2767. ext4_li_info = eli;
  2768. return 0;
  2769. }
  2770. static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
  2771. ext4_group_t start)
  2772. {
  2773. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2774. struct ext4_li_request *elr;
  2775. elr = kzalloc(sizeof(*elr), GFP_KERNEL);
  2776. if (!elr)
  2777. return NULL;
  2778. elr->lr_super = sb;
  2779. elr->lr_sbi = sbi;
  2780. elr->lr_next_group = start;
  2781. /*
  2782. * Randomize first schedule time of the request to
  2783. * spread the inode table initialization requests
  2784. * better.
  2785. */
  2786. elr->lr_next_sched = jiffies + (prandom_u32() %
  2787. (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2788. return elr;
  2789. }
  2790. int ext4_register_li_request(struct super_block *sb,
  2791. ext4_group_t first_not_zeroed)
  2792. {
  2793. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2794. struct ext4_li_request *elr = NULL;
  2795. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  2796. int ret = 0;
  2797. mutex_lock(&ext4_li_mtx);
  2798. if (sbi->s_li_request != NULL) {
  2799. /*
  2800. * Reset timeout so it can be computed again, because
  2801. * s_li_wait_mult might have changed.
  2802. */
  2803. sbi->s_li_request->lr_timeout = 0;
  2804. goto out;
  2805. }
  2806. if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
  2807. !test_opt(sb, INIT_INODE_TABLE))
  2808. goto out;
  2809. elr = ext4_li_request_new(sb, first_not_zeroed);
  2810. if (!elr) {
  2811. ret = -ENOMEM;
  2812. goto out;
  2813. }
  2814. if (NULL == ext4_li_info) {
  2815. ret = ext4_li_info_new();
  2816. if (ret)
  2817. goto out;
  2818. }
  2819. mutex_lock(&ext4_li_info->li_list_mtx);
  2820. list_add(&elr->lr_request, &ext4_li_info->li_request_list);
  2821. mutex_unlock(&ext4_li_info->li_list_mtx);
  2822. sbi->s_li_request = elr;
  2823. /*
  2824. * set elr to NULL here since it has been inserted to
  2825. * the request_list and the removal and free of it is
  2826. * handled by ext4_clear_request_list from now on.
  2827. */
  2828. elr = NULL;
  2829. if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
  2830. ret = ext4_run_lazyinit_thread();
  2831. if (ret)
  2832. goto out;
  2833. }
  2834. out:
  2835. mutex_unlock(&ext4_li_mtx);
  2836. if (ret)
  2837. kfree(elr);
  2838. return ret;
  2839. }
  2840. /*
  2841. * We do not need to lock anything since this is called on
  2842. * module unload.
  2843. */
  2844. static void ext4_destroy_lazyinit_thread(void)
  2845. {
  2846. /*
  2847. * If thread exited earlier
  2848. * there's nothing to be done.
  2849. */
  2850. if (!ext4_li_info || !ext4_lazyinit_task)
  2851. return;
  2852. kthread_stop(ext4_lazyinit_task);
  2853. }
  2854. static int set_journal_csum_feature_set(struct super_block *sb)
  2855. {
  2856. int ret = 1;
  2857. int compat, incompat;
  2858. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2859. if (ext4_has_metadata_csum(sb)) {
  2860. /* journal checksum v3 */
  2861. compat = 0;
  2862. incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
  2863. } else {
  2864. /* journal checksum v1 */
  2865. compat = JBD2_FEATURE_COMPAT_CHECKSUM;
  2866. incompat = 0;
  2867. }
  2868. jbd2_journal_clear_features(sbi->s_journal,
  2869. JBD2_FEATURE_COMPAT_CHECKSUM, 0,
  2870. JBD2_FEATURE_INCOMPAT_CSUM_V3 |
  2871. JBD2_FEATURE_INCOMPAT_CSUM_V2);
  2872. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  2873. ret = jbd2_journal_set_features(sbi->s_journal,
  2874. compat, 0,
  2875. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
  2876. incompat);
  2877. } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
  2878. ret = jbd2_journal_set_features(sbi->s_journal,
  2879. compat, 0,
  2880. incompat);
  2881. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2882. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2883. } else {
  2884. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2885. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2886. }
  2887. return ret;
  2888. }
  2889. /*
  2890. * Note: calculating the overhead so we can be compatible with
  2891. * historical BSD practice is quite difficult in the face of
  2892. * clusters/bigalloc. This is because multiple metadata blocks from
  2893. * different block group can end up in the same allocation cluster.
  2894. * Calculating the exact overhead in the face of clustered allocation
  2895. * requires either O(all block bitmaps) in memory or O(number of block
  2896. * groups**2) in time. We will still calculate the superblock for
  2897. * older file systems --- and if we come across with a bigalloc file
  2898. * system with zero in s_overhead_clusters the estimate will be close to
  2899. * correct especially for very large cluster sizes --- but for newer
  2900. * file systems, it's better to calculate this figure once at mkfs
  2901. * time, and store it in the superblock. If the superblock value is
  2902. * present (even for non-bigalloc file systems), we will use it.
  2903. */
  2904. static int count_overhead(struct super_block *sb, ext4_group_t grp,
  2905. char *buf)
  2906. {
  2907. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2908. struct ext4_group_desc *gdp;
  2909. ext4_fsblk_t first_block, last_block, b;
  2910. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2911. int s, j, count = 0;
  2912. if (!ext4_has_feature_bigalloc(sb))
  2913. return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
  2914. sbi->s_itb_per_group + 2);
  2915. first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
  2916. (grp * EXT4_BLOCKS_PER_GROUP(sb));
  2917. last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
  2918. for (i = 0; i < ngroups; i++) {
  2919. gdp = ext4_get_group_desc(sb, i, NULL);
  2920. b = ext4_block_bitmap(sb, gdp);
  2921. if (b >= first_block && b <= last_block) {
  2922. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2923. count++;
  2924. }
  2925. b = ext4_inode_bitmap(sb, gdp);
  2926. if (b >= first_block && b <= last_block) {
  2927. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2928. count++;
  2929. }
  2930. b = ext4_inode_table(sb, gdp);
  2931. if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
  2932. for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
  2933. int c = EXT4_B2C(sbi, b - first_block);
  2934. ext4_set_bit(c, buf);
  2935. count++;
  2936. }
  2937. if (i != grp)
  2938. continue;
  2939. s = 0;
  2940. if (ext4_bg_has_super(sb, grp)) {
  2941. ext4_set_bit(s++, buf);
  2942. count++;
  2943. }
  2944. j = ext4_bg_num_gdb(sb, grp);
  2945. if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
  2946. ext4_error(sb, "Invalid number of block group "
  2947. "descriptor blocks: %d", j);
  2948. j = EXT4_BLOCKS_PER_GROUP(sb) - s;
  2949. }
  2950. count += j;
  2951. for (; j > 0; j--)
  2952. ext4_set_bit(EXT4_B2C(sbi, s++), buf);
  2953. }
  2954. if (!count)
  2955. return 0;
  2956. return EXT4_CLUSTERS_PER_GROUP(sb) -
  2957. ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
  2958. }
  2959. /*
  2960. * Compute the overhead and stash it in sbi->s_overhead
  2961. */
  2962. int ext4_calculate_overhead(struct super_block *sb)
  2963. {
  2964. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2965. struct ext4_super_block *es = sbi->s_es;
  2966. struct inode *j_inode;
  2967. unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
  2968. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2969. ext4_fsblk_t overhead = 0;
  2970. char *buf = (char *) get_zeroed_page(GFP_NOFS);
  2971. if (!buf)
  2972. return -ENOMEM;
  2973. /*
  2974. * Compute the overhead (FS structures). This is constant
  2975. * for a given filesystem unless the number of block groups
  2976. * changes so we cache the previous value until it does.
  2977. */
  2978. /*
  2979. * All of the blocks before first_data_block are overhead
  2980. */
  2981. overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
  2982. /*
  2983. * Add the overhead found in each block group
  2984. */
  2985. for (i = 0; i < ngroups; i++) {
  2986. int blks;
  2987. blks = count_overhead(sb, i, buf);
  2988. overhead += blks;
  2989. if (blks)
  2990. memset(buf, 0, PAGE_SIZE);
  2991. cond_resched();
  2992. }
  2993. /*
  2994. * Add the internal journal blocks whether the journal has been
  2995. * loaded or not
  2996. */
  2997. if (sbi->s_journal && !sbi->journal_bdev)
  2998. overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
  2999. else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
  3000. j_inode = ext4_get_journal_inode(sb, j_inum);
  3001. if (j_inode) {
  3002. j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
  3003. overhead += EXT4_NUM_B2C(sbi, j_blocks);
  3004. iput(j_inode);
  3005. } else {
  3006. ext4_msg(sb, KERN_ERR, "can't get journal size");
  3007. }
  3008. }
  3009. sbi->s_overhead = overhead;
  3010. smp_wmb();
  3011. free_page((unsigned long) buf);
  3012. return 0;
  3013. }
  3014. static void ext4_set_resv_clusters(struct super_block *sb)
  3015. {
  3016. ext4_fsblk_t resv_clusters;
  3017. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3018. /*
  3019. * There's no need to reserve anything when we aren't using extents.
  3020. * The space estimates are exact, there are no unwritten extents,
  3021. * hole punching doesn't need new metadata... This is needed especially
  3022. * to keep ext2/3 backward compatibility.
  3023. */
  3024. if (!ext4_has_feature_extents(sb))
  3025. return;
  3026. /*
  3027. * By default we reserve 2% or 4096 clusters, whichever is smaller.
  3028. * This should cover the situations where we can not afford to run
  3029. * out of space like for example punch hole, or converting
  3030. * unwritten extents in delalloc path. In most cases such
  3031. * allocation would require 1, or 2 blocks, higher numbers are
  3032. * very rare.
  3033. */
  3034. resv_clusters = (ext4_blocks_count(sbi->s_es) >>
  3035. sbi->s_cluster_bits);
  3036. do_div(resv_clusters, 50);
  3037. resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
  3038. atomic64_set(&sbi->s_resv_clusters, resv_clusters);
  3039. }
  3040. static int ext4_fill_super(struct super_block *sb, void *data, int silent)
  3041. {
  3042. struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
  3043. char *orig_data = kstrdup(data, GFP_KERNEL);
  3044. struct buffer_head *bh;
  3045. struct ext4_super_block *es = NULL;
  3046. struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
  3047. ext4_fsblk_t block;
  3048. ext4_fsblk_t sb_block = get_sb_block(&data);
  3049. ext4_fsblk_t logical_sb_block;
  3050. unsigned long offset = 0;
  3051. unsigned long journal_devnum = 0;
  3052. unsigned long def_mount_opts;
  3053. struct inode *root;
  3054. const char *descr;
  3055. int ret = -ENOMEM;
  3056. int blocksize, clustersize;
  3057. unsigned int db_count;
  3058. unsigned int i;
  3059. int needs_recovery, has_huge_files, has_bigalloc;
  3060. __u64 blocks_count;
  3061. int err = 0;
  3062. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  3063. ext4_group_t first_not_zeroed;
  3064. if ((data && !orig_data) || !sbi)
  3065. goto out_free_base;
  3066. sbi->s_daxdev = dax_dev;
  3067. sbi->s_blockgroup_lock =
  3068. kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
  3069. if (!sbi->s_blockgroup_lock)
  3070. goto out_free_base;
  3071. sb->s_fs_info = sbi;
  3072. sbi->s_sb = sb;
  3073. sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
  3074. sbi->s_sb_block = sb_block;
  3075. if (sb->s_bdev->bd_part)
  3076. sbi->s_sectors_written_start =
  3077. part_stat_read(sb->s_bdev->bd_part, sectors[1]);
  3078. /* Cleanup superblock name */
  3079. strreplace(sb->s_id, '/', '!');
  3080. /* -EINVAL is default */
  3081. ret = -EINVAL;
  3082. blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
  3083. if (!blocksize) {
  3084. ext4_msg(sb, KERN_ERR, "unable to set blocksize");
  3085. goto out_fail;
  3086. }
  3087. /*
  3088. * The ext4 superblock will not be buffer aligned for other than 1kB
  3089. * block sizes. We need to calculate the offset from buffer start.
  3090. */
  3091. if (blocksize != EXT4_MIN_BLOCK_SIZE) {
  3092. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3093. offset = do_div(logical_sb_block, blocksize);
  3094. } else {
  3095. logical_sb_block = sb_block;
  3096. }
  3097. if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
  3098. ext4_msg(sb, KERN_ERR, "unable to read superblock");
  3099. goto out_fail;
  3100. }
  3101. /*
  3102. * Note: s_es must be initialized as soon as possible because
  3103. * some ext4 macro-instructions depend on its value
  3104. */
  3105. es = (struct ext4_super_block *) (bh->b_data + offset);
  3106. sbi->s_es = es;
  3107. sb->s_magic = le16_to_cpu(es->s_magic);
  3108. if (sb->s_magic != EXT4_SUPER_MAGIC)
  3109. goto cantfind_ext4;
  3110. sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
  3111. /* Warn if metadata_csum and gdt_csum are both set. */
  3112. if (ext4_has_feature_metadata_csum(sb) &&
  3113. ext4_has_feature_gdt_csum(sb))
  3114. ext4_warning(sb, "metadata_csum and uninit_bg are "
  3115. "redundant flags; please run fsck.");
  3116. /* Check for a known checksum algorithm */
  3117. if (!ext4_verify_csum_type(sb, es)) {
  3118. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3119. "unknown checksum algorithm.");
  3120. silent = 1;
  3121. goto cantfind_ext4;
  3122. }
  3123. /* Load the checksum driver */
  3124. if (ext4_has_feature_metadata_csum(sb) ||
  3125. ext4_has_feature_ea_inode(sb)) {
  3126. sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
  3127. if (IS_ERR(sbi->s_chksum_driver)) {
  3128. ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
  3129. ret = PTR_ERR(sbi->s_chksum_driver);
  3130. sbi->s_chksum_driver = NULL;
  3131. goto failed_mount;
  3132. }
  3133. }
  3134. /* Check superblock checksum */
  3135. if (!ext4_superblock_csum_verify(sb, es)) {
  3136. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3137. "invalid superblock checksum. Run e2fsck?");
  3138. silent = 1;
  3139. ret = -EFSBADCRC;
  3140. goto cantfind_ext4;
  3141. }
  3142. /* Precompute checksum seed for all metadata */
  3143. if (ext4_has_feature_csum_seed(sb))
  3144. sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
  3145. else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
  3146. sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
  3147. sizeof(es->s_uuid));
  3148. /* Set defaults before we parse the mount options */
  3149. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  3150. set_opt(sb, INIT_INODE_TABLE);
  3151. if (def_mount_opts & EXT4_DEFM_DEBUG)
  3152. set_opt(sb, DEBUG);
  3153. if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
  3154. set_opt(sb, GRPID);
  3155. if (def_mount_opts & EXT4_DEFM_UID16)
  3156. set_opt(sb, NO_UID32);
  3157. /* xattr user namespace & acls are now defaulted on */
  3158. set_opt(sb, XATTR_USER);
  3159. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  3160. set_opt(sb, POSIX_ACL);
  3161. #endif
  3162. /* don't forget to enable journal_csum when metadata_csum is enabled. */
  3163. if (ext4_has_metadata_csum(sb))
  3164. set_opt(sb, JOURNAL_CHECKSUM);
  3165. if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
  3166. set_opt(sb, JOURNAL_DATA);
  3167. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
  3168. set_opt(sb, ORDERED_DATA);
  3169. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
  3170. set_opt(sb, WRITEBACK_DATA);
  3171. if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
  3172. set_opt(sb, ERRORS_PANIC);
  3173. else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
  3174. set_opt(sb, ERRORS_CONT);
  3175. else
  3176. set_opt(sb, ERRORS_RO);
  3177. /* block_validity enabled by default; disable with noblock_validity */
  3178. set_opt(sb, BLOCK_VALIDITY);
  3179. if (def_mount_opts & EXT4_DEFM_DISCARD)
  3180. set_opt(sb, DISCARD);
  3181. sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
  3182. sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
  3183. sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
  3184. sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
  3185. sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
  3186. if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
  3187. set_opt(sb, BARRIER);
  3188. /*
  3189. * enable delayed allocation by default
  3190. * Use -o nodelalloc to turn it off
  3191. */
  3192. if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
  3193. ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
  3194. set_opt(sb, DELALLOC);
  3195. /*
  3196. * set default s_li_wait_mult for lazyinit, for the case there is
  3197. * no mount option specified.
  3198. */
  3199. sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
  3200. if (sbi->s_es->s_mount_opts[0]) {
  3201. char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
  3202. sizeof(sbi->s_es->s_mount_opts),
  3203. GFP_KERNEL);
  3204. if (!s_mount_opts)
  3205. goto failed_mount;
  3206. if (!parse_options(s_mount_opts, sb, &journal_devnum,
  3207. &journal_ioprio, 0)) {
  3208. ext4_msg(sb, KERN_WARNING,
  3209. "failed to parse options in superblock: %s",
  3210. s_mount_opts);
  3211. }
  3212. kfree(s_mount_opts);
  3213. }
  3214. sbi->s_def_mount_opt = sbi->s_mount_opt;
  3215. if (!parse_options((char *) data, sb, &journal_devnum,
  3216. &journal_ioprio, 0))
  3217. goto failed_mount;
  3218. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  3219. printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
  3220. "with data=journal disables delayed "
  3221. "allocation and O_DIRECT support!\n");
  3222. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  3223. ext4_msg(sb, KERN_ERR, "can't mount with "
  3224. "both data=journal and delalloc");
  3225. goto failed_mount;
  3226. }
  3227. if (test_opt(sb, DIOREAD_NOLOCK)) {
  3228. ext4_msg(sb, KERN_ERR, "can't mount with "
  3229. "both data=journal and dioread_nolock");
  3230. goto failed_mount;
  3231. }
  3232. if (test_opt(sb, DAX)) {
  3233. ext4_msg(sb, KERN_ERR, "can't mount with "
  3234. "both data=journal and dax");
  3235. goto failed_mount;
  3236. }
  3237. if (ext4_has_feature_encrypt(sb)) {
  3238. ext4_msg(sb, KERN_WARNING,
  3239. "encrypted files will use data=ordered "
  3240. "instead of data journaling mode");
  3241. }
  3242. if (test_opt(sb, DELALLOC))
  3243. clear_opt(sb, DELALLOC);
  3244. } else {
  3245. sb->s_iflags |= SB_I_CGROUPWB;
  3246. }
  3247. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  3248. (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
  3249. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
  3250. (ext4_has_compat_features(sb) ||
  3251. ext4_has_ro_compat_features(sb) ||
  3252. ext4_has_incompat_features(sb)))
  3253. ext4_msg(sb, KERN_WARNING,
  3254. "feature flags set on rev 0 fs, "
  3255. "running e2fsck is recommended");
  3256. if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
  3257. set_opt2(sb, HURD_COMPAT);
  3258. if (ext4_has_feature_64bit(sb)) {
  3259. ext4_msg(sb, KERN_ERR,
  3260. "The Hurd can't support 64-bit file systems");
  3261. goto failed_mount;
  3262. }
  3263. /*
  3264. * ea_inode feature uses l_i_version field which is not
  3265. * available in HURD_COMPAT mode.
  3266. */
  3267. if (ext4_has_feature_ea_inode(sb)) {
  3268. ext4_msg(sb, KERN_ERR,
  3269. "ea_inode feature is not supported for Hurd");
  3270. goto failed_mount;
  3271. }
  3272. }
  3273. if (IS_EXT2_SB(sb)) {
  3274. if (ext2_feature_set_ok(sb))
  3275. ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
  3276. "using the ext4 subsystem");
  3277. else {
  3278. ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
  3279. "to feature incompatibilities");
  3280. goto failed_mount;
  3281. }
  3282. }
  3283. if (IS_EXT3_SB(sb)) {
  3284. if (ext3_feature_set_ok(sb))
  3285. ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
  3286. "using the ext4 subsystem");
  3287. else {
  3288. ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
  3289. "to feature incompatibilities");
  3290. goto failed_mount;
  3291. }
  3292. }
  3293. /*
  3294. * Check feature flags regardless of the revision level, since we
  3295. * previously didn't change the revision level when setting the flags,
  3296. * so there is a chance incompat flags are set on a rev 0 filesystem.
  3297. */
  3298. if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
  3299. goto failed_mount;
  3300. blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
  3301. if (blocksize < EXT4_MIN_BLOCK_SIZE ||
  3302. blocksize > EXT4_MAX_BLOCK_SIZE) {
  3303. ext4_msg(sb, KERN_ERR,
  3304. "Unsupported filesystem blocksize %d (%d log_block_size)",
  3305. blocksize, le32_to_cpu(es->s_log_block_size));
  3306. goto failed_mount;
  3307. }
  3308. if (le32_to_cpu(es->s_log_block_size) >
  3309. (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3310. ext4_msg(sb, KERN_ERR,
  3311. "Invalid log block size: %u",
  3312. le32_to_cpu(es->s_log_block_size));
  3313. goto failed_mount;
  3314. }
  3315. if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
  3316. ext4_msg(sb, KERN_ERR,
  3317. "Number of reserved GDT blocks insanely large: %d",
  3318. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
  3319. goto failed_mount;
  3320. }
  3321. if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
  3322. err = bdev_dax_supported(sb, blocksize);
  3323. if (err)
  3324. goto failed_mount;
  3325. }
  3326. if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
  3327. ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
  3328. es->s_encryption_level);
  3329. goto failed_mount;
  3330. }
  3331. if (sb->s_blocksize != blocksize) {
  3332. /* Validate the filesystem blocksize */
  3333. if (!sb_set_blocksize(sb, blocksize)) {
  3334. ext4_msg(sb, KERN_ERR, "bad block size %d",
  3335. blocksize);
  3336. goto failed_mount;
  3337. }
  3338. brelse(bh);
  3339. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3340. offset = do_div(logical_sb_block, blocksize);
  3341. bh = sb_bread_unmovable(sb, logical_sb_block);
  3342. if (!bh) {
  3343. ext4_msg(sb, KERN_ERR,
  3344. "Can't read superblock on 2nd try");
  3345. goto failed_mount;
  3346. }
  3347. es = (struct ext4_super_block *)(bh->b_data + offset);
  3348. sbi->s_es = es;
  3349. if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
  3350. ext4_msg(sb, KERN_ERR,
  3351. "Magic mismatch, very weird!");
  3352. goto failed_mount;
  3353. }
  3354. }
  3355. has_huge_files = ext4_has_feature_huge_file(sb);
  3356. sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
  3357. has_huge_files);
  3358. sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
  3359. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
  3360. sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
  3361. sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
  3362. } else {
  3363. sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
  3364. sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
  3365. if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
  3366. (!is_power_of_2(sbi->s_inode_size)) ||
  3367. (sbi->s_inode_size > blocksize)) {
  3368. ext4_msg(sb, KERN_ERR,
  3369. "unsupported inode size: %d",
  3370. sbi->s_inode_size);
  3371. goto failed_mount;
  3372. }
  3373. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
  3374. sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
  3375. }
  3376. sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
  3377. if (ext4_has_feature_64bit(sb)) {
  3378. if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
  3379. sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
  3380. !is_power_of_2(sbi->s_desc_size)) {
  3381. ext4_msg(sb, KERN_ERR,
  3382. "unsupported descriptor size %lu",
  3383. sbi->s_desc_size);
  3384. goto failed_mount;
  3385. }
  3386. } else
  3387. sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
  3388. sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
  3389. sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
  3390. sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
  3391. if (sbi->s_inodes_per_block == 0)
  3392. goto cantfind_ext4;
  3393. if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
  3394. sbi->s_inodes_per_group > blocksize * 8) {
  3395. ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
  3396. sbi->s_blocks_per_group);
  3397. goto failed_mount;
  3398. }
  3399. sbi->s_itb_per_group = sbi->s_inodes_per_group /
  3400. sbi->s_inodes_per_block;
  3401. sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
  3402. sbi->s_sbh = bh;
  3403. sbi->s_mount_state = le16_to_cpu(es->s_state);
  3404. sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
  3405. sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
  3406. for (i = 0; i < 4; i++)
  3407. sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
  3408. sbi->s_def_hash_version = es->s_def_hash_version;
  3409. if (ext4_has_feature_dir_index(sb)) {
  3410. i = le32_to_cpu(es->s_flags);
  3411. if (i & EXT2_FLAGS_UNSIGNED_HASH)
  3412. sbi->s_hash_unsigned = 3;
  3413. else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
  3414. #ifdef __CHAR_UNSIGNED__
  3415. if (!sb_rdonly(sb))
  3416. es->s_flags |=
  3417. cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
  3418. sbi->s_hash_unsigned = 3;
  3419. #else
  3420. if (!sb_rdonly(sb))
  3421. es->s_flags |=
  3422. cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
  3423. #endif
  3424. }
  3425. }
  3426. /* Handle clustersize */
  3427. clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
  3428. has_bigalloc = ext4_has_feature_bigalloc(sb);
  3429. if (has_bigalloc) {
  3430. if (clustersize < blocksize) {
  3431. ext4_msg(sb, KERN_ERR,
  3432. "cluster size (%d) smaller than "
  3433. "block size (%d)", clustersize, blocksize);
  3434. goto failed_mount;
  3435. }
  3436. if (le32_to_cpu(es->s_log_cluster_size) >
  3437. (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3438. ext4_msg(sb, KERN_ERR,
  3439. "Invalid log cluster size: %u",
  3440. le32_to_cpu(es->s_log_cluster_size));
  3441. goto failed_mount;
  3442. }
  3443. sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
  3444. le32_to_cpu(es->s_log_block_size);
  3445. sbi->s_clusters_per_group =
  3446. le32_to_cpu(es->s_clusters_per_group);
  3447. if (sbi->s_clusters_per_group > blocksize * 8) {
  3448. ext4_msg(sb, KERN_ERR,
  3449. "#clusters per group too big: %lu",
  3450. sbi->s_clusters_per_group);
  3451. goto failed_mount;
  3452. }
  3453. if (sbi->s_blocks_per_group !=
  3454. (sbi->s_clusters_per_group * (clustersize / blocksize))) {
  3455. ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
  3456. "clusters per group (%lu) inconsistent",
  3457. sbi->s_blocks_per_group,
  3458. sbi->s_clusters_per_group);
  3459. goto failed_mount;
  3460. }
  3461. } else {
  3462. if (clustersize != blocksize) {
  3463. ext4_warning(sb, "fragment/cluster size (%d) != "
  3464. "block size (%d)", clustersize,
  3465. blocksize);
  3466. clustersize = blocksize;
  3467. }
  3468. if (sbi->s_blocks_per_group > blocksize * 8) {
  3469. ext4_msg(sb, KERN_ERR,
  3470. "#blocks per group too big: %lu",
  3471. sbi->s_blocks_per_group);
  3472. goto failed_mount;
  3473. }
  3474. sbi->s_clusters_per_group = sbi->s_blocks_per_group;
  3475. sbi->s_cluster_bits = 0;
  3476. }
  3477. sbi->s_cluster_ratio = clustersize / blocksize;
  3478. /* Do we have standard group size of clustersize * 8 blocks ? */
  3479. if (sbi->s_blocks_per_group == clustersize << 3)
  3480. set_opt2(sb, STD_GROUP_SIZE);
  3481. /*
  3482. * Test whether we have more sectors than will fit in sector_t,
  3483. * and whether the max offset is addressable by the page cache.
  3484. */
  3485. err = generic_check_addressable(sb->s_blocksize_bits,
  3486. ext4_blocks_count(es));
  3487. if (err) {
  3488. ext4_msg(sb, KERN_ERR, "filesystem"
  3489. " too large to mount safely on this system");
  3490. if (sizeof(sector_t) < 8)
  3491. ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
  3492. goto failed_mount;
  3493. }
  3494. if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
  3495. goto cantfind_ext4;
  3496. /* check blocks count against device size */
  3497. blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
  3498. if (blocks_count && ext4_blocks_count(es) > blocks_count) {
  3499. ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
  3500. "exceeds size of device (%llu blocks)",
  3501. ext4_blocks_count(es), blocks_count);
  3502. goto failed_mount;
  3503. }
  3504. /*
  3505. * It makes no sense for the first data block to be beyond the end
  3506. * of the filesystem.
  3507. */
  3508. if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
  3509. ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
  3510. "block %u is beyond end of filesystem (%llu)",
  3511. le32_to_cpu(es->s_first_data_block),
  3512. ext4_blocks_count(es));
  3513. goto failed_mount;
  3514. }
  3515. blocks_count = (ext4_blocks_count(es) -
  3516. le32_to_cpu(es->s_first_data_block) +
  3517. EXT4_BLOCKS_PER_GROUP(sb) - 1);
  3518. do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
  3519. if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
  3520. ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
  3521. "(block count %llu, first data block %u, "
  3522. "blocks per group %lu)", sbi->s_groups_count,
  3523. ext4_blocks_count(es),
  3524. le32_to_cpu(es->s_first_data_block),
  3525. EXT4_BLOCKS_PER_GROUP(sb));
  3526. goto failed_mount;
  3527. }
  3528. sbi->s_groups_count = blocks_count;
  3529. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  3530. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  3531. db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
  3532. EXT4_DESC_PER_BLOCK(sb);
  3533. if (ext4_has_feature_meta_bg(sb)) {
  3534. if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
  3535. ext4_msg(sb, KERN_WARNING,
  3536. "first meta block group too large: %u "
  3537. "(group descriptor block count %u)",
  3538. le32_to_cpu(es->s_first_meta_bg), db_count);
  3539. goto failed_mount;
  3540. }
  3541. }
  3542. sbi->s_group_desc = kvmalloc(db_count *
  3543. sizeof(struct buffer_head *),
  3544. GFP_KERNEL);
  3545. if (sbi->s_group_desc == NULL) {
  3546. ext4_msg(sb, KERN_ERR, "not enough memory");
  3547. ret = -ENOMEM;
  3548. goto failed_mount;
  3549. }
  3550. bgl_lock_init(sbi->s_blockgroup_lock);
  3551. /* Pre-read the descriptors into the buffer cache */
  3552. for (i = 0; i < db_count; i++) {
  3553. block = descriptor_loc(sb, logical_sb_block, i);
  3554. sb_breadahead(sb, block);
  3555. }
  3556. for (i = 0; i < db_count; i++) {
  3557. block = descriptor_loc(sb, logical_sb_block, i);
  3558. sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
  3559. if (!sbi->s_group_desc[i]) {
  3560. ext4_msg(sb, KERN_ERR,
  3561. "can't read group descriptor %d", i);
  3562. db_count = i;
  3563. goto failed_mount2;
  3564. }
  3565. }
  3566. if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
  3567. ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
  3568. ret = -EFSCORRUPTED;
  3569. goto failed_mount2;
  3570. }
  3571. sbi->s_gdb_count = db_count;
  3572. get_random_bytes(&sbi->s_next_generation, sizeof(u32));
  3573. spin_lock_init(&sbi->s_next_gen_lock);
  3574. setup_timer(&sbi->s_err_report, print_daily_error_info,
  3575. (unsigned long) sb);
  3576. /* Register extent status tree shrinker */
  3577. if (ext4_es_register_shrinker(sbi))
  3578. goto failed_mount3;
  3579. sbi->s_stripe = ext4_get_stripe_size(sbi);
  3580. sbi->s_extent_max_zeroout_kb = 32;
  3581. /*
  3582. * set up enough so that it can read an inode
  3583. */
  3584. sb->s_op = &ext4_sops;
  3585. sb->s_export_op = &ext4_export_ops;
  3586. sb->s_xattr = ext4_xattr_handlers;
  3587. sb->s_cop = &ext4_cryptops;
  3588. #ifdef CONFIG_QUOTA
  3589. sb->dq_op = &ext4_quota_operations;
  3590. if (ext4_has_feature_quota(sb))
  3591. sb->s_qcop = &dquot_quotactl_sysfile_ops;
  3592. else
  3593. sb->s_qcop = &ext4_qctl_operations;
  3594. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  3595. #endif
  3596. memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
  3597. INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
  3598. mutex_init(&sbi->s_orphan_lock);
  3599. sb->s_root = NULL;
  3600. needs_recovery = (es->s_last_orphan != 0 ||
  3601. ext4_has_feature_journal_needs_recovery(sb));
  3602. if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
  3603. if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
  3604. goto failed_mount3a;
  3605. /*
  3606. * The first inode we look at is the journal inode. Don't try
  3607. * root first: it may be modified in the journal!
  3608. */
  3609. if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
  3610. err = ext4_load_journal(sb, es, journal_devnum);
  3611. if (err)
  3612. goto failed_mount3a;
  3613. } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
  3614. ext4_has_feature_journal_needs_recovery(sb)) {
  3615. ext4_msg(sb, KERN_ERR, "required journal recovery "
  3616. "suppressed and not mounted read-only");
  3617. goto failed_mount_wq;
  3618. } else {
  3619. /* Nojournal mode, all journal mount options are illegal */
  3620. if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
  3621. ext4_msg(sb, KERN_ERR, "can't mount with "
  3622. "journal_checksum, fs mounted w/o journal");
  3623. goto failed_mount_wq;
  3624. }
  3625. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3626. ext4_msg(sb, KERN_ERR, "can't mount with "
  3627. "journal_async_commit, fs mounted w/o journal");
  3628. goto failed_mount_wq;
  3629. }
  3630. if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
  3631. ext4_msg(sb, KERN_ERR, "can't mount with "
  3632. "commit=%lu, fs mounted w/o journal",
  3633. sbi->s_commit_interval / HZ);
  3634. goto failed_mount_wq;
  3635. }
  3636. if (EXT4_MOUNT_DATA_FLAGS &
  3637. (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
  3638. ext4_msg(sb, KERN_ERR, "can't mount with "
  3639. "data=, fs mounted w/o journal");
  3640. goto failed_mount_wq;
  3641. }
  3642. sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
  3643. clear_opt(sb, JOURNAL_CHECKSUM);
  3644. clear_opt(sb, DATA_FLAGS);
  3645. sbi->s_journal = NULL;
  3646. needs_recovery = 0;
  3647. goto no_journal;
  3648. }
  3649. if (ext4_has_feature_64bit(sb) &&
  3650. !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
  3651. JBD2_FEATURE_INCOMPAT_64BIT)) {
  3652. ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
  3653. goto failed_mount_wq;
  3654. }
  3655. if (!set_journal_csum_feature_set(sb)) {
  3656. ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
  3657. "feature set");
  3658. goto failed_mount_wq;
  3659. }
  3660. /* We have now updated the journal if required, so we can
  3661. * validate the data journaling mode. */
  3662. switch (test_opt(sb, DATA_FLAGS)) {
  3663. case 0:
  3664. /* No mode set, assume a default based on the journal
  3665. * capabilities: ORDERED_DATA if the journal can
  3666. * cope, else JOURNAL_DATA
  3667. */
  3668. if (jbd2_journal_check_available_features
  3669. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
  3670. set_opt(sb, ORDERED_DATA);
  3671. else
  3672. set_opt(sb, JOURNAL_DATA);
  3673. break;
  3674. case EXT4_MOUNT_ORDERED_DATA:
  3675. case EXT4_MOUNT_WRITEBACK_DATA:
  3676. if (!jbd2_journal_check_available_features
  3677. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
  3678. ext4_msg(sb, KERN_ERR, "Journal does not support "
  3679. "requested data journaling mode");
  3680. goto failed_mount_wq;
  3681. }
  3682. default:
  3683. break;
  3684. }
  3685. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
  3686. test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3687. ext4_msg(sb, KERN_ERR, "can't mount with "
  3688. "journal_async_commit in data=ordered mode");
  3689. goto failed_mount_wq;
  3690. }
  3691. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  3692. sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
  3693. no_journal:
  3694. if (!test_opt(sb, NO_MBCACHE)) {
  3695. sbi->s_ea_block_cache = ext4_xattr_create_cache();
  3696. if (!sbi->s_ea_block_cache) {
  3697. ext4_msg(sb, KERN_ERR,
  3698. "Failed to create ea_block_cache");
  3699. goto failed_mount_wq;
  3700. }
  3701. if (ext4_has_feature_ea_inode(sb)) {
  3702. sbi->s_ea_inode_cache = ext4_xattr_create_cache();
  3703. if (!sbi->s_ea_inode_cache) {
  3704. ext4_msg(sb, KERN_ERR,
  3705. "Failed to create ea_inode_cache");
  3706. goto failed_mount_wq;
  3707. }
  3708. }
  3709. }
  3710. if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
  3711. (blocksize != PAGE_SIZE)) {
  3712. ext4_msg(sb, KERN_ERR,
  3713. "Unsupported blocksize for fs encryption");
  3714. goto failed_mount_wq;
  3715. }
  3716. if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
  3717. !ext4_has_feature_encrypt(sb)) {
  3718. ext4_set_feature_encrypt(sb);
  3719. ext4_commit_super(sb, 1);
  3720. }
  3721. /*
  3722. * Get the # of file system overhead blocks from the
  3723. * superblock if present.
  3724. */
  3725. if (es->s_overhead_clusters)
  3726. sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
  3727. else {
  3728. err = ext4_calculate_overhead(sb);
  3729. if (err)
  3730. goto failed_mount_wq;
  3731. }
  3732. /*
  3733. * The maximum number of concurrent works can be high and
  3734. * concurrency isn't really necessary. Limit it to 1.
  3735. */
  3736. EXT4_SB(sb)->rsv_conversion_wq =
  3737. alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
  3738. if (!EXT4_SB(sb)->rsv_conversion_wq) {
  3739. printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
  3740. ret = -ENOMEM;
  3741. goto failed_mount4;
  3742. }
  3743. /*
  3744. * The jbd2_journal_load will have done any necessary log recovery,
  3745. * so we can safely mount the rest of the filesystem now.
  3746. */
  3747. root = ext4_iget(sb, EXT4_ROOT_INO);
  3748. if (IS_ERR(root)) {
  3749. ext4_msg(sb, KERN_ERR, "get root inode failed");
  3750. ret = PTR_ERR(root);
  3751. root = NULL;
  3752. goto failed_mount4;
  3753. }
  3754. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  3755. ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
  3756. iput(root);
  3757. goto failed_mount4;
  3758. }
  3759. sb->s_root = d_make_root(root);
  3760. if (!sb->s_root) {
  3761. ext4_msg(sb, KERN_ERR, "get root dentry failed");
  3762. ret = -ENOMEM;
  3763. goto failed_mount4;
  3764. }
  3765. if (ext4_setup_super(sb, es, sb_rdonly(sb)))
  3766. sb->s_flags |= MS_RDONLY;
  3767. /* determine the minimum size of new large inodes, if present */
  3768. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
  3769. sbi->s_want_extra_isize == 0) {
  3770. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3771. EXT4_GOOD_OLD_INODE_SIZE;
  3772. if (ext4_has_feature_extra_isize(sb)) {
  3773. if (sbi->s_want_extra_isize <
  3774. le16_to_cpu(es->s_want_extra_isize))
  3775. sbi->s_want_extra_isize =
  3776. le16_to_cpu(es->s_want_extra_isize);
  3777. if (sbi->s_want_extra_isize <
  3778. le16_to_cpu(es->s_min_extra_isize))
  3779. sbi->s_want_extra_isize =
  3780. le16_to_cpu(es->s_min_extra_isize);
  3781. }
  3782. }
  3783. /* Check if enough inode space is available */
  3784. if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
  3785. sbi->s_inode_size) {
  3786. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3787. EXT4_GOOD_OLD_INODE_SIZE;
  3788. ext4_msg(sb, KERN_INFO, "required extra inode space not"
  3789. "available");
  3790. }
  3791. ext4_set_resv_clusters(sb);
  3792. err = ext4_setup_system_zone(sb);
  3793. if (err) {
  3794. ext4_msg(sb, KERN_ERR, "failed to initialize system "
  3795. "zone (%d)", err);
  3796. goto failed_mount4a;
  3797. }
  3798. ext4_ext_init(sb);
  3799. err = ext4_mb_init(sb);
  3800. if (err) {
  3801. ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
  3802. err);
  3803. goto failed_mount5;
  3804. }
  3805. block = ext4_count_free_clusters(sb);
  3806. ext4_free_blocks_count_set(sbi->s_es,
  3807. EXT4_C2B(sbi, block));
  3808. err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
  3809. GFP_KERNEL);
  3810. if (!err) {
  3811. unsigned long freei = ext4_count_free_inodes(sb);
  3812. sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
  3813. err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
  3814. GFP_KERNEL);
  3815. }
  3816. if (!err)
  3817. err = percpu_counter_init(&sbi->s_dirs_counter,
  3818. ext4_count_dirs(sb), GFP_KERNEL);
  3819. if (!err)
  3820. err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
  3821. GFP_KERNEL);
  3822. if (!err)
  3823. err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
  3824. if (err) {
  3825. ext4_msg(sb, KERN_ERR, "insufficient memory");
  3826. goto failed_mount6;
  3827. }
  3828. if (ext4_has_feature_flex_bg(sb))
  3829. if (!ext4_fill_flex_info(sb)) {
  3830. ext4_msg(sb, KERN_ERR,
  3831. "unable to initialize "
  3832. "flex_bg meta info!");
  3833. goto failed_mount6;
  3834. }
  3835. err = ext4_register_li_request(sb, first_not_zeroed);
  3836. if (err)
  3837. goto failed_mount6;
  3838. err = ext4_register_sysfs(sb);
  3839. if (err)
  3840. goto failed_mount7;
  3841. #ifdef CONFIG_QUOTA
  3842. /* Enable quota usage during mount. */
  3843. if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
  3844. err = ext4_enable_quotas(sb);
  3845. if (err)
  3846. goto failed_mount8;
  3847. }
  3848. #endif /* CONFIG_QUOTA */
  3849. EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
  3850. ext4_orphan_cleanup(sb, es);
  3851. EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
  3852. if (needs_recovery) {
  3853. ext4_msg(sb, KERN_INFO, "recovery complete");
  3854. ext4_mark_recovery_complete(sb, es);
  3855. }
  3856. if (EXT4_SB(sb)->s_journal) {
  3857. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  3858. descr = " journalled data mode";
  3859. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  3860. descr = " ordered data mode";
  3861. else
  3862. descr = " writeback data mode";
  3863. } else
  3864. descr = "out journal";
  3865. if (test_opt(sb, DISCARD)) {
  3866. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  3867. if (!blk_queue_discard(q))
  3868. ext4_msg(sb, KERN_WARNING,
  3869. "mounting with \"discard\" option, but "
  3870. "the device does not support discard");
  3871. }
  3872. if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
  3873. ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
  3874. "Opts: %.*s%s%s", descr,
  3875. (int) sizeof(sbi->s_es->s_mount_opts),
  3876. sbi->s_es->s_mount_opts,
  3877. *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
  3878. if (es->s_error_count)
  3879. mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
  3880. /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
  3881. ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
  3882. ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
  3883. ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
  3884. kfree(orig_data);
  3885. return 0;
  3886. cantfind_ext4:
  3887. if (!silent)
  3888. ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
  3889. goto failed_mount;
  3890. #ifdef CONFIG_QUOTA
  3891. failed_mount8:
  3892. ext4_unregister_sysfs(sb);
  3893. #endif
  3894. failed_mount7:
  3895. ext4_unregister_li_request(sb);
  3896. failed_mount6:
  3897. ext4_mb_release(sb);
  3898. if (sbi->s_flex_groups)
  3899. kvfree(sbi->s_flex_groups);
  3900. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  3901. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  3902. percpu_counter_destroy(&sbi->s_dirs_counter);
  3903. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  3904. failed_mount5:
  3905. ext4_ext_release(sb);
  3906. ext4_release_system_zone(sb);
  3907. failed_mount4a:
  3908. dput(sb->s_root);
  3909. sb->s_root = NULL;
  3910. failed_mount4:
  3911. ext4_msg(sb, KERN_ERR, "mount failed");
  3912. if (EXT4_SB(sb)->rsv_conversion_wq)
  3913. destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
  3914. failed_mount_wq:
  3915. if (sbi->s_ea_inode_cache) {
  3916. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  3917. sbi->s_ea_inode_cache = NULL;
  3918. }
  3919. if (sbi->s_ea_block_cache) {
  3920. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  3921. sbi->s_ea_block_cache = NULL;
  3922. }
  3923. if (sbi->s_journal) {
  3924. jbd2_journal_destroy(sbi->s_journal);
  3925. sbi->s_journal = NULL;
  3926. }
  3927. failed_mount3a:
  3928. ext4_es_unregister_shrinker(sbi);
  3929. failed_mount3:
  3930. del_timer_sync(&sbi->s_err_report);
  3931. if (sbi->s_mmp_tsk)
  3932. kthread_stop(sbi->s_mmp_tsk);
  3933. failed_mount2:
  3934. for (i = 0; i < db_count; i++)
  3935. brelse(sbi->s_group_desc[i]);
  3936. kvfree(sbi->s_group_desc);
  3937. failed_mount:
  3938. if (sbi->s_chksum_driver)
  3939. crypto_free_shash(sbi->s_chksum_driver);
  3940. #ifdef CONFIG_QUOTA
  3941. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  3942. kfree(sbi->s_qf_names[i]);
  3943. #endif
  3944. ext4_blkdev_remove(sbi);
  3945. brelse(bh);
  3946. out_fail:
  3947. sb->s_fs_info = NULL;
  3948. kfree(sbi->s_blockgroup_lock);
  3949. out_free_base:
  3950. kfree(sbi);
  3951. kfree(orig_data);
  3952. fs_put_dax(dax_dev);
  3953. return err ? err : ret;
  3954. }
  3955. /*
  3956. * Setup any per-fs journal parameters now. We'll do this both on
  3957. * initial mount, once the journal has been initialised but before we've
  3958. * done any recovery; and again on any subsequent remount.
  3959. */
  3960. static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
  3961. {
  3962. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3963. journal->j_commit_interval = sbi->s_commit_interval;
  3964. journal->j_min_batch_time = sbi->s_min_batch_time;
  3965. journal->j_max_batch_time = sbi->s_max_batch_time;
  3966. write_lock(&journal->j_state_lock);
  3967. if (test_opt(sb, BARRIER))
  3968. journal->j_flags |= JBD2_BARRIER;
  3969. else
  3970. journal->j_flags &= ~JBD2_BARRIER;
  3971. if (test_opt(sb, DATA_ERR_ABORT))
  3972. journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
  3973. else
  3974. journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
  3975. write_unlock(&journal->j_state_lock);
  3976. }
  3977. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  3978. unsigned int journal_inum)
  3979. {
  3980. struct inode *journal_inode;
  3981. /*
  3982. * Test for the existence of a valid inode on disk. Bad things
  3983. * happen if we iget() an unused inode, as the subsequent iput()
  3984. * will try to delete it.
  3985. */
  3986. journal_inode = ext4_iget(sb, journal_inum);
  3987. if (IS_ERR(journal_inode)) {
  3988. ext4_msg(sb, KERN_ERR, "no journal found");
  3989. return NULL;
  3990. }
  3991. if (!journal_inode->i_nlink) {
  3992. make_bad_inode(journal_inode);
  3993. iput(journal_inode);
  3994. ext4_msg(sb, KERN_ERR, "journal inode is deleted");
  3995. return NULL;
  3996. }
  3997. jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
  3998. journal_inode, journal_inode->i_size);
  3999. if (!S_ISREG(journal_inode->i_mode)) {
  4000. ext4_msg(sb, KERN_ERR, "invalid journal inode");
  4001. iput(journal_inode);
  4002. return NULL;
  4003. }
  4004. return journal_inode;
  4005. }
  4006. static journal_t *ext4_get_journal(struct super_block *sb,
  4007. unsigned int journal_inum)
  4008. {
  4009. struct inode *journal_inode;
  4010. journal_t *journal;
  4011. BUG_ON(!ext4_has_feature_journal(sb));
  4012. journal_inode = ext4_get_journal_inode(sb, journal_inum);
  4013. if (!journal_inode)
  4014. return NULL;
  4015. journal = jbd2_journal_init_inode(journal_inode);
  4016. if (!journal) {
  4017. ext4_msg(sb, KERN_ERR, "Could not load journal inode");
  4018. iput(journal_inode);
  4019. return NULL;
  4020. }
  4021. journal->j_private = sb;
  4022. ext4_init_journal_params(sb, journal);
  4023. return journal;
  4024. }
  4025. static journal_t *ext4_get_dev_journal(struct super_block *sb,
  4026. dev_t j_dev)
  4027. {
  4028. struct buffer_head *bh;
  4029. journal_t *journal;
  4030. ext4_fsblk_t start;
  4031. ext4_fsblk_t len;
  4032. int hblock, blocksize;
  4033. ext4_fsblk_t sb_block;
  4034. unsigned long offset;
  4035. struct ext4_super_block *es;
  4036. struct block_device *bdev;
  4037. BUG_ON(!ext4_has_feature_journal(sb));
  4038. bdev = ext4_blkdev_get(j_dev, sb);
  4039. if (bdev == NULL)
  4040. return NULL;
  4041. blocksize = sb->s_blocksize;
  4042. hblock = bdev_logical_block_size(bdev);
  4043. if (blocksize < hblock) {
  4044. ext4_msg(sb, KERN_ERR,
  4045. "blocksize too small for journal device");
  4046. goto out_bdev;
  4047. }
  4048. sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
  4049. offset = EXT4_MIN_BLOCK_SIZE % blocksize;
  4050. set_blocksize(bdev, blocksize);
  4051. if (!(bh = __bread(bdev, sb_block, blocksize))) {
  4052. ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
  4053. "external journal");
  4054. goto out_bdev;
  4055. }
  4056. es = (struct ext4_super_block *) (bh->b_data + offset);
  4057. if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
  4058. !(le32_to_cpu(es->s_feature_incompat) &
  4059. EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
  4060. ext4_msg(sb, KERN_ERR, "external journal has "
  4061. "bad superblock");
  4062. brelse(bh);
  4063. goto out_bdev;
  4064. }
  4065. if ((le32_to_cpu(es->s_feature_ro_compat) &
  4066. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
  4067. es->s_checksum != ext4_superblock_csum(sb, es)) {
  4068. ext4_msg(sb, KERN_ERR, "external journal has "
  4069. "corrupt superblock");
  4070. brelse(bh);
  4071. goto out_bdev;
  4072. }
  4073. if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
  4074. ext4_msg(sb, KERN_ERR, "journal UUID does not match");
  4075. brelse(bh);
  4076. goto out_bdev;
  4077. }
  4078. len = ext4_blocks_count(es);
  4079. start = sb_block + 1;
  4080. brelse(bh); /* we're done with the superblock */
  4081. journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
  4082. start, len, blocksize);
  4083. if (!journal) {
  4084. ext4_msg(sb, KERN_ERR, "failed to create device journal");
  4085. goto out_bdev;
  4086. }
  4087. journal->j_private = sb;
  4088. ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
  4089. wait_on_buffer(journal->j_sb_buffer);
  4090. if (!buffer_uptodate(journal->j_sb_buffer)) {
  4091. ext4_msg(sb, KERN_ERR, "I/O error on journal device");
  4092. goto out_journal;
  4093. }
  4094. if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
  4095. ext4_msg(sb, KERN_ERR, "External journal has more than one "
  4096. "user (unsupported) - %d",
  4097. be32_to_cpu(journal->j_superblock->s_nr_users));
  4098. goto out_journal;
  4099. }
  4100. EXT4_SB(sb)->journal_bdev = bdev;
  4101. ext4_init_journal_params(sb, journal);
  4102. return journal;
  4103. out_journal:
  4104. jbd2_journal_destroy(journal);
  4105. out_bdev:
  4106. ext4_blkdev_put(bdev);
  4107. return NULL;
  4108. }
  4109. static int ext4_load_journal(struct super_block *sb,
  4110. struct ext4_super_block *es,
  4111. unsigned long journal_devnum)
  4112. {
  4113. journal_t *journal;
  4114. unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
  4115. dev_t journal_dev;
  4116. int err = 0;
  4117. int really_read_only;
  4118. BUG_ON(!ext4_has_feature_journal(sb));
  4119. if (journal_devnum &&
  4120. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4121. ext4_msg(sb, KERN_INFO, "external journal device major/minor "
  4122. "numbers have changed");
  4123. journal_dev = new_decode_dev(journal_devnum);
  4124. } else
  4125. journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
  4126. really_read_only = bdev_read_only(sb->s_bdev);
  4127. /*
  4128. * Are we loading a blank journal or performing recovery after a
  4129. * crash? For recovery, we need to check in advance whether we
  4130. * can get read-write access to the device.
  4131. */
  4132. if (ext4_has_feature_journal_needs_recovery(sb)) {
  4133. if (sb_rdonly(sb)) {
  4134. ext4_msg(sb, KERN_INFO, "INFO: recovery "
  4135. "required on readonly filesystem");
  4136. if (really_read_only) {
  4137. ext4_msg(sb, KERN_ERR, "write access "
  4138. "unavailable, cannot proceed");
  4139. return -EROFS;
  4140. }
  4141. ext4_msg(sb, KERN_INFO, "write access will "
  4142. "be enabled during recovery");
  4143. }
  4144. }
  4145. if (journal_inum && journal_dev) {
  4146. ext4_msg(sb, KERN_ERR, "filesystem has both journal "
  4147. "and inode journals!");
  4148. return -EINVAL;
  4149. }
  4150. if (journal_inum) {
  4151. if (!(journal = ext4_get_journal(sb, journal_inum)))
  4152. return -EINVAL;
  4153. } else {
  4154. if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
  4155. return -EINVAL;
  4156. }
  4157. if (!(journal->j_flags & JBD2_BARRIER))
  4158. ext4_msg(sb, KERN_INFO, "barriers disabled");
  4159. if (!ext4_has_feature_journal_needs_recovery(sb))
  4160. err = jbd2_journal_wipe(journal, !really_read_only);
  4161. if (!err) {
  4162. char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
  4163. if (save)
  4164. memcpy(save, ((char *) es) +
  4165. EXT4_S_ERR_START, EXT4_S_ERR_LEN);
  4166. err = jbd2_journal_load(journal);
  4167. if (save)
  4168. memcpy(((char *) es) + EXT4_S_ERR_START,
  4169. save, EXT4_S_ERR_LEN);
  4170. kfree(save);
  4171. }
  4172. if (err) {
  4173. ext4_msg(sb, KERN_ERR, "error loading journal");
  4174. jbd2_journal_destroy(journal);
  4175. return err;
  4176. }
  4177. EXT4_SB(sb)->s_journal = journal;
  4178. ext4_clear_journal_err(sb, es);
  4179. if (!really_read_only && journal_devnum &&
  4180. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4181. es->s_journal_dev = cpu_to_le32(journal_devnum);
  4182. /* Make sure we flush the recovery flag to disk. */
  4183. ext4_commit_super(sb, 1);
  4184. }
  4185. return 0;
  4186. }
  4187. static int ext4_commit_super(struct super_block *sb, int sync)
  4188. {
  4189. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  4190. struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
  4191. int error = 0;
  4192. if (!sbh || block_device_ejected(sb))
  4193. return error;
  4194. /*
  4195. * If the file system is mounted read-only, don't update the
  4196. * superblock write time. This avoids updating the superblock
  4197. * write time when we are mounting the root file system
  4198. * read/only but we need to replay the journal; at that point,
  4199. * for people who are east of GMT and who make their clock
  4200. * tick in localtime for Windows bug-for-bug compatibility,
  4201. * the clock is set in the future, and this will cause e2fsck
  4202. * to complain and force a full file system check.
  4203. */
  4204. if (!(sb->s_flags & MS_RDONLY))
  4205. es->s_wtime = cpu_to_le32(get_seconds());
  4206. if (sb->s_bdev->bd_part)
  4207. es->s_kbytes_written =
  4208. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
  4209. ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
  4210. EXT4_SB(sb)->s_sectors_written_start) >> 1));
  4211. else
  4212. es->s_kbytes_written =
  4213. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
  4214. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
  4215. ext4_free_blocks_count_set(es,
  4216. EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
  4217. &EXT4_SB(sb)->s_freeclusters_counter)));
  4218. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
  4219. es->s_free_inodes_count =
  4220. cpu_to_le32(percpu_counter_sum_positive(
  4221. &EXT4_SB(sb)->s_freeinodes_counter));
  4222. BUFFER_TRACE(sbh, "marking dirty");
  4223. ext4_superblock_csum_set(sb);
  4224. if (sync)
  4225. lock_buffer(sbh);
  4226. if (buffer_write_io_error(sbh)) {
  4227. /*
  4228. * Oh, dear. A previous attempt to write the
  4229. * superblock failed. This could happen because the
  4230. * USB device was yanked out. Or it could happen to
  4231. * be a transient write error and maybe the block will
  4232. * be remapped. Nothing we can do but to retry the
  4233. * write and hope for the best.
  4234. */
  4235. ext4_msg(sb, KERN_ERR, "previous I/O error to "
  4236. "superblock detected");
  4237. clear_buffer_write_io_error(sbh);
  4238. set_buffer_uptodate(sbh);
  4239. }
  4240. mark_buffer_dirty(sbh);
  4241. if (sync) {
  4242. unlock_buffer(sbh);
  4243. error = __sync_dirty_buffer(sbh,
  4244. REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
  4245. if (error)
  4246. return error;
  4247. error = buffer_write_io_error(sbh);
  4248. if (error) {
  4249. ext4_msg(sb, KERN_ERR, "I/O error while writing "
  4250. "superblock");
  4251. clear_buffer_write_io_error(sbh);
  4252. set_buffer_uptodate(sbh);
  4253. }
  4254. }
  4255. return error;
  4256. }
  4257. /*
  4258. * Have we just finished recovery? If so, and if we are mounting (or
  4259. * remounting) the filesystem readonly, then we will end up with a
  4260. * consistent fs on disk. Record that fact.
  4261. */
  4262. static void ext4_mark_recovery_complete(struct super_block *sb,
  4263. struct ext4_super_block *es)
  4264. {
  4265. journal_t *journal = EXT4_SB(sb)->s_journal;
  4266. if (!ext4_has_feature_journal(sb)) {
  4267. BUG_ON(journal != NULL);
  4268. return;
  4269. }
  4270. jbd2_journal_lock_updates(journal);
  4271. if (jbd2_journal_flush(journal) < 0)
  4272. goto out;
  4273. if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
  4274. ext4_clear_feature_journal_needs_recovery(sb);
  4275. ext4_commit_super(sb, 1);
  4276. }
  4277. out:
  4278. jbd2_journal_unlock_updates(journal);
  4279. }
  4280. /*
  4281. * If we are mounting (or read-write remounting) a filesystem whose journal
  4282. * has recorded an error from a previous lifetime, move that error to the
  4283. * main filesystem now.
  4284. */
  4285. static void ext4_clear_journal_err(struct super_block *sb,
  4286. struct ext4_super_block *es)
  4287. {
  4288. journal_t *journal;
  4289. int j_errno;
  4290. const char *errstr;
  4291. BUG_ON(!ext4_has_feature_journal(sb));
  4292. journal = EXT4_SB(sb)->s_journal;
  4293. /*
  4294. * Now check for any error status which may have been recorded in the
  4295. * journal by a prior ext4_error() or ext4_abort()
  4296. */
  4297. j_errno = jbd2_journal_errno(journal);
  4298. if (j_errno) {
  4299. char nbuf[16];
  4300. errstr = ext4_decode_error(sb, j_errno, nbuf);
  4301. ext4_warning(sb, "Filesystem error recorded "
  4302. "from previous mount: %s", errstr);
  4303. ext4_warning(sb, "Marking fs in need of filesystem check.");
  4304. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  4305. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  4306. ext4_commit_super(sb, 1);
  4307. jbd2_journal_clear_err(journal);
  4308. jbd2_journal_update_sb_errno(journal);
  4309. }
  4310. }
  4311. /*
  4312. * Force the running and committing transactions to commit,
  4313. * and wait on the commit.
  4314. */
  4315. int ext4_force_commit(struct super_block *sb)
  4316. {
  4317. journal_t *journal;
  4318. if (sb_rdonly(sb))
  4319. return 0;
  4320. journal = EXT4_SB(sb)->s_journal;
  4321. return ext4_journal_force_commit(journal);
  4322. }
  4323. static int ext4_sync_fs(struct super_block *sb, int wait)
  4324. {
  4325. int ret = 0;
  4326. tid_t target;
  4327. bool needs_barrier = false;
  4328. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4329. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  4330. return 0;
  4331. trace_ext4_sync_fs(sb, wait);
  4332. flush_workqueue(sbi->rsv_conversion_wq);
  4333. /*
  4334. * Writeback quota in non-journalled quota case - journalled quota has
  4335. * no dirty dquots
  4336. */
  4337. dquot_writeback_dquots(sb, -1);
  4338. /*
  4339. * Data writeback is possible w/o journal transaction, so barrier must
  4340. * being sent at the end of the function. But we can skip it if
  4341. * transaction_commit will do it for us.
  4342. */
  4343. if (sbi->s_journal) {
  4344. target = jbd2_get_latest_transaction(sbi->s_journal);
  4345. if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
  4346. !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
  4347. needs_barrier = true;
  4348. if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
  4349. if (wait)
  4350. ret = jbd2_log_wait_commit(sbi->s_journal,
  4351. target);
  4352. }
  4353. } else if (wait && test_opt(sb, BARRIER))
  4354. needs_barrier = true;
  4355. if (needs_barrier) {
  4356. int err;
  4357. err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
  4358. if (!ret)
  4359. ret = err;
  4360. }
  4361. return ret;
  4362. }
  4363. /*
  4364. * LVM calls this function before a (read-only) snapshot is created. This
  4365. * gives us a chance to flush the journal completely and mark the fs clean.
  4366. *
  4367. * Note that only this function cannot bring a filesystem to be in a clean
  4368. * state independently. It relies on upper layer to stop all data & metadata
  4369. * modifications.
  4370. */
  4371. static int ext4_freeze(struct super_block *sb)
  4372. {
  4373. int error = 0;
  4374. journal_t *journal;
  4375. if (sb_rdonly(sb))
  4376. return 0;
  4377. journal = EXT4_SB(sb)->s_journal;
  4378. if (journal) {
  4379. /* Now we set up the journal barrier. */
  4380. jbd2_journal_lock_updates(journal);
  4381. /*
  4382. * Don't clear the needs_recovery flag if we failed to
  4383. * flush the journal.
  4384. */
  4385. error = jbd2_journal_flush(journal);
  4386. if (error < 0)
  4387. goto out;
  4388. /* Journal blocked and flushed, clear needs_recovery flag. */
  4389. ext4_clear_feature_journal_needs_recovery(sb);
  4390. }
  4391. error = ext4_commit_super(sb, 1);
  4392. out:
  4393. if (journal)
  4394. /* we rely on upper layer to stop further updates */
  4395. jbd2_journal_unlock_updates(journal);
  4396. return error;
  4397. }
  4398. /*
  4399. * Called by LVM after the snapshot is done. We need to reset the RECOVER
  4400. * flag here, even though the filesystem is not technically dirty yet.
  4401. */
  4402. static int ext4_unfreeze(struct super_block *sb)
  4403. {
  4404. if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
  4405. return 0;
  4406. if (EXT4_SB(sb)->s_journal) {
  4407. /* Reset the needs_recovery flag before the fs is unlocked. */
  4408. ext4_set_feature_journal_needs_recovery(sb);
  4409. }
  4410. ext4_commit_super(sb, 1);
  4411. return 0;
  4412. }
  4413. /*
  4414. * Structure to save mount options for ext4_remount's benefit
  4415. */
  4416. struct ext4_mount_options {
  4417. unsigned long s_mount_opt;
  4418. unsigned long s_mount_opt2;
  4419. kuid_t s_resuid;
  4420. kgid_t s_resgid;
  4421. unsigned long s_commit_interval;
  4422. u32 s_min_batch_time, s_max_batch_time;
  4423. #ifdef CONFIG_QUOTA
  4424. int s_jquota_fmt;
  4425. char *s_qf_names[EXT4_MAXQUOTAS];
  4426. #endif
  4427. };
  4428. static int ext4_remount(struct super_block *sb, int *flags, char *data)
  4429. {
  4430. struct ext4_super_block *es;
  4431. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4432. unsigned long old_sb_flags;
  4433. struct ext4_mount_options old_opts;
  4434. int enable_quota = 0;
  4435. ext4_group_t g;
  4436. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  4437. int err = 0;
  4438. #ifdef CONFIG_QUOTA
  4439. int i, j;
  4440. #endif
  4441. char *orig_data = kstrdup(data, GFP_KERNEL);
  4442. /* Store the original options */
  4443. old_sb_flags = sb->s_flags;
  4444. old_opts.s_mount_opt = sbi->s_mount_opt;
  4445. old_opts.s_mount_opt2 = sbi->s_mount_opt2;
  4446. old_opts.s_resuid = sbi->s_resuid;
  4447. old_opts.s_resgid = sbi->s_resgid;
  4448. old_opts.s_commit_interval = sbi->s_commit_interval;
  4449. old_opts.s_min_batch_time = sbi->s_min_batch_time;
  4450. old_opts.s_max_batch_time = sbi->s_max_batch_time;
  4451. #ifdef CONFIG_QUOTA
  4452. old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
  4453. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4454. if (sbi->s_qf_names[i]) {
  4455. old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
  4456. GFP_KERNEL);
  4457. if (!old_opts.s_qf_names[i]) {
  4458. for (j = 0; j < i; j++)
  4459. kfree(old_opts.s_qf_names[j]);
  4460. kfree(orig_data);
  4461. return -ENOMEM;
  4462. }
  4463. } else
  4464. old_opts.s_qf_names[i] = NULL;
  4465. #endif
  4466. if (sbi->s_journal && sbi->s_journal->j_task->io_context)
  4467. journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
  4468. if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
  4469. err = -EINVAL;
  4470. goto restore_opts;
  4471. }
  4472. if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
  4473. test_opt(sb, JOURNAL_CHECKSUM)) {
  4474. ext4_msg(sb, KERN_ERR, "changing journal_checksum "
  4475. "during remount not supported; ignoring");
  4476. sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
  4477. }
  4478. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  4479. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  4480. ext4_msg(sb, KERN_ERR, "can't mount with "
  4481. "both data=journal and delalloc");
  4482. err = -EINVAL;
  4483. goto restore_opts;
  4484. }
  4485. if (test_opt(sb, DIOREAD_NOLOCK)) {
  4486. ext4_msg(sb, KERN_ERR, "can't mount with "
  4487. "both data=journal and dioread_nolock");
  4488. err = -EINVAL;
  4489. goto restore_opts;
  4490. }
  4491. if (test_opt(sb, DAX)) {
  4492. ext4_msg(sb, KERN_ERR, "can't mount with "
  4493. "both data=journal and dax");
  4494. err = -EINVAL;
  4495. goto restore_opts;
  4496. }
  4497. } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
  4498. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  4499. ext4_msg(sb, KERN_ERR, "can't mount with "
  4500. "journal_async_commit in data=ordered mode");
  4501. err = -EINVAL;
  4502. goto restore_opts;
  4503. }
  4504. }
  4505. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
  4506. ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
  4507. err = -EINVAL;
  4508. goto restore_opts;
  4509. }
  4510. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
  4511. ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
  4512. "dax flag with busy inodes while remounting");
  4513. sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
  4514. }
  4515. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
  4516. ext4_abort(sb, "Abort forced by user");
  4517. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  4518. (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
  4519. es = sbi->s_es;
  4520. if (sbi->s_journal) {
  4521. ext4_init_journal_params(sb, sbi->s_journal);
  4522. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  4523. }
  4524. if (*flags & MS_LAZYTIME)
  4525. sb->s_flags |= MS_LAZYTIME;
  4526. if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
  4527. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
  4528. err = -EROFS;
  4529. goto restore_opts;
  4530. }
  4531. if (*flags & MS_RDONLY) {
  4532. err = sync_filesystem(sb);
  4533. if (err < 0)
  4534. goto restore_opts;
  4535. err = dquot_suspend(sb, -1);
  4536. if (err < 0)
  4537. goto restore_opts;
  4538. /*
  4539. * First of all, the unconditional stuff we have to do
  4540. * to disable replay of the journal when we next remount
  4541. */
  4542. sb->s_flags |= MS_RDONLY;
  4543. /*
  4544. * OK, test if we are remounting a valid rw partition
  4545. * readonly, and if so set the rdonly flag and then
  4546. * mark the partition as valid again.
  4547. */
  4548. if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
  4549. (sbi->s_mount_state & EXT4_VALID_FS))
  4550. es->s_state = cpu_to_le16(sbi->s_mount_state);
  4551. if (sbi->s_journal)
  4552. ext4_mark_recovery_complete(sb, es);
  4553. } else {
  4554. /* Make sure we can mount this feature set readwrite */
  4555. if (ext4_has_feature_readonly(sb) ||
  4556. !ext4_feature_set_ok(sb, 0)) {
  4557. err = -EROFS;
  4558. goto restore_opts;
  4559. }
  4560. /*
  4561. * Make sure the group descriptor checksums
  4562. * are sane. If they aren't, refuse to remount r/w.
  4563. */
  4564. for (g = 0; g < sbi->s_groups_count; g++) {
  4565. struct ext4_group_desc *gdp =
  4566. ext4_get_group_desc(sb, g, NULL);
  4567. if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
  4568. ext4_msg(sb, KERN_ERR,
  4569. "ext4_remount: Checksum for group %u failed (%u!=%u)",
  4570. g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
  4571. le16_to_cpu(gdp->bg_checksum));
  4572. err = -EFSBADCRC;
  4573. goto restore_opts;
  4574. }
  4575. }
  4576. /*
  4577. * If we have an unprocessed orphan list hanging
  4578. * around from a previously readonly bdev mount,
  4579. * require a full umount/remount for now.
  4580. */
  4581. if (es->s_last_orphan) {
  4582. ext4_msg(sb, KERN_WARNING, "Couldn't "
  4583. "remount RDWR because of unprocessed "
  4584. "orphan inode list. Please "
  4585. "umount/remount instead");
  4586. err = -EINVAL;
  4587. goto restore_opts;
  4588. }
  4589. /*
  4590. * Mounting a RDONLY partition read-write, so reread
  4591. * and store the current valid flag. (It may have
  4592. * been changed by e2fsck since we originally mounted
  4593. * the partition.)
  4594. */
  4595. if (sbi->s_journal)
  4596. ext4_clear_journal_err(sb, es);
  4597. sbi->s_mount_state = le16_to_cpu(es->s_state);
  4598. if (!ext4_setup_super(sb, es, 0))
  4599. sb->s_flags &= ~MS_RDONLY;
  4600. if (ext4_has_feature_mmp(sb))
  4601. if (ext4_multi_mount_protect(sb,
  4602. le64_to_cpu(es->s_mmp_block))) {
  4603. err = -EROFS;
  4604. goto restore_opts;
  4605. }
  4606. enable_quota = 1;
  4607. }
  4608. }
  4609. /*
  4610. * Reinitialize lazy itable initialization thread based on
  4611. * current settings
  4612. */
  4613. if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
  4614. ext4_unregister_li_request(sb);
  4615. else {
  4616. ext4_group_t first_not_zeroed;
  4617. first_not_zeroed = ext4_has_uninit_itable(sb);
  4618. ext4_register_li_request(sb, first_not_zeroed);
  4619. }
  4620. ext4_setup_system_zone(sb);
  4621. if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
  4622. ext4_commit_super(sb, 1);
  4623. #ifdef CONFIG_QUOTA
  4624. /* Release old quota file names */
  4625. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4626. kfree(old_opts.s_qf_names[i]);
  4627. if (enable_quota) {
  4628. if (sb_any_quota_suspended(sb))
  4629. dquot_resume(sb, -1);
  4630. else if (ext4_has_feature_quota(sb)) {
  4631. err = ext4_enable_quotas(sb);
  4632. if (err)
  4633. goto restore_opts;
  4634. }
  4635. }
  4636. #endif
  4637. *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
  4638. ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
  4639. kfree(orig_data);
  4640. return 0;
  4641. restore_opts:
  4642. sb->s_flags = old_sb_flags;
  4643. sbi->s_mount_opt = old_opts.s_mount_opt;
  4644. sbi->s_mount_opt2 = old_opts.s_mount_opt2;
  4645. sbi->s_resuid = old_opts.s_resuid;
  4646. sbi->s_resgid = old_opts.s_resgid;
  4647. sbi->s_commit_interval = old_opts.s_commit_interval;
  4648. sbi->s_min_batch_time = old_opts.s_min_batch_time;
  4649. sbi->s_max_batch_time = old_opts.s_max_batch_time;
  4650. #ifdef CONFIG_QUOTA
  4651. sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
  4652. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  4653. kfree(sbi->s_qf_names[i]);
  4654. sbi->s_qf_names[i] = old_opts.s_qf_names[i];
  4655. }
  4656. #endif
  4657. kfree(orig_data);
  4658. return err;
  4659. }
  4660. #ifdef CONFIG_QUOTA
  4661. static int ext4_statfs_project(struct super_block *sb,
  4662. kprojid_t projid, struct kstatfs *buf)
  4663. {
  4664. struct kqid qid;
  4665. struct dquot *dquot;
  4666. u64 limit;
  4667. u64 curblock;
  4668. qid = make_kqid_projid(projid);
  4669. dquot = dqget(sb, qid);
  4670. if (IS_ERR(dquot))
  4671. return PTR_ERR(dquot);
  4672. spin_lock(&dquot->dq_dqb_lock);
  4673. limit = (dquot->dq_dqb.dqb_bsoftlimit ?
  4674. dquot->dq_dqb.dqb_bsoftlimit :
  4675. dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
  4676. if (limit && buf->f_blocks > limit) {
  4677. curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
  4678. buf->f_blocks = limit;
  4679. buf->f_bfree = buf->f_bavail =
  4680. (buf->f_blocks > curblock) ?
  4681. (buf->f_blocks - curblock) : 0;
  4682. }
  4683. limit = dquot->dq_dqb.dqb_isoftlimit ?
  4684. dquot->dq_dqb.dqb_isoftlimit :
  4685. dquot->dq_dqb.dqb_ihardlimit;
  4686. if (limit && buf->f_files > limit) {
  4687. buf->f_files = limit;
  4688. buf->f_ffree =
  4689. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  4690. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  4691. }
  4692. spin_unlock(&dquot->dq_dqb_lock);
  4693. dqput(dquot);
  4694. return 0;
  4695. }
  4696. #endif
  4697. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
  4698. {
  4699. struct super_block *sb = dentry->d_sb;
  4700. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4701. struct ext4_super_block *es = sbi->s_es;
  4702. ext4_fsblk_t overhead = 0, resv_blocks;
  4703. u64 fsid;
  4704. s64 bfree;
  4705. resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
  4706. if (!test_opt(sb, MINIX_DF))
  4707. overhead = sbi->s_overhead;
  4708. buf->f_type = EXT4_SUPER_MAGIC;
  4709. buf->f_bsize = sb->s_blocksize;
  4710. buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
  4711. bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
  4712. percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
  4713. /* prevent underflow in case that few free space is available */
  4714. buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
  4715. buf->f_bavail = buf->f_bfree -
  4716. (ext4_r_blocks_count(es) + resv_blocks);
  4717. if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
  4718. buf->f_bavail = 0;
  4719. buf->f_files = le32_to_cpu(es->s_inodes_count);
  4720. buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
  4721. buf->f_namelen = EXT4_NAME_LEN;
  4722. fsid = le64_to_cpup((void *)es->s_uuid) ^
  4723. le64_to_cpup((void *)es->s_uuid + sizeof(u64));
  4724. buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
  4725. buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
  4726. #ifdef CONFIG_QUOTA
  4727. if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
  4728. sb_has_quota_limits_enabled(sb, PRJQUOTA))
  4729. ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
  4730. #endif
  4731. return 0;
  4732. }
  4733. #ifdef CONFIG_QUOTA
  4734. /*
  4735. * Helper functions so that transaction is started before we acquire dqio_sem
  4736. * to keep correct lock ordering of transaction > dqio_sem
  4737. */
  4738. static inline struct inode *dquot_to_inode(struct dquot *dquot)
  4739. {
  4740. return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
  4741. }
  4742. static int ext4_write_dquot(struct dquot *dquot)
  4743. {
  4744. int ret, err;
  4745. handle_t *handle;
  4746. struct inode *inode;
  4747. inode = dquot_to_inode(dquot);
  4748. handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
  4749. EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
  4750. if (IS_ERR(handle))
  4751. return PTR_ERR(handle);
  4752. ret = dquot_commit(dquot);
  4753. err = ext4_journal_stop(handle);
  4754. if (!ret)
  4755. ret = err;
  4756. return ret;
  4757. }
  4758. static int ext4_acquire_dquot(struct dquot *dquot)
  4759. {
  4760. int ret, err;
  4761. handle_t *handle;
  4762. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4763. EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
  4764. if (IS_ERR(handle))
  4765. return PTR_ERR(handle);
  4766. ret = dquot_acquire(dquot);
  4767. err = ext4_journal_stop(handle);
  4768. if (!ret)
  4769. ret = err;
  4770. return ret;
  4771. }
  4772. static int ext4_release_dquot(struct dquot *dquot)
  4773. {
  4774. int ret, err;
  4775. handle_t *handle;
  4776. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4777. EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
  4778. if (IS_ERR(handle)) {
  4779. /* Release dquot anyway to avoid endless cycle in dqput() */
  4780. dquot_release(dquot);
  4781. return PTR_ERR(handle);
  4782. }
  4783. ret = dquot_release(dquot);
  4784. err = ext4_journal_stop(handle);
  4785. if (!ret)
  4786. ret = err;
  4787. return ret;
  4788. }
  4789. static int ext4_mark_dquot_dirty(struct dquot *dquot)
  4790. {
  4791. struct super_block *sb = dquot->dq_sb;
  4792. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4793. /* Are we journaling quotas? */
  4794. if (ext4_has_feature_quota(sb) ||
  4795. sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  4796. dquot_mark_dquot_dirty(dquot);
  4797. return ext4_write_dquot(dquot);
  4798. } else {
  4799. return dquot_mark_dquot_dirty(dquot);
  4800. }
  4801. }
  4802. static int ext4_write_info(struct super_block *sb, int type)
  4803. {
  4804. int ret, err;
  4805. handle_t *handle;
  4806. /* Data block + inode block */
  4807. handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
  4808. if (IS_ERR(handle))
  4809. return PTR_ERR(handle);
  4810. ret = dquot_commit_info(sb, type);
  4811. err = ext4_journal_stop(handle);
  4812. if (!ret)
  4813. ret = err;
  4814. return ret;
  4815. }
  4816. /*
  4817. * Turn on quotas during mount time - we need to find
  4818. * the quota file and such...
  4819. */
  4820. static int ext4_quota_on_mount(struct super_block *sb, int type)
  4821. {
  4822. return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
  4823. EXT4_SB(sb)->s_jquota_fmt, type);
  4824. }
  4825. static void lockdep_set_quota_inode(struct inode *inode, int subclass)
  4826. {
  4827. struct ext4_inode_info *ei = EXT4_I(inode);
  4828. /* The first argument of lockdep_set_subclass has to be
  4829. * *exactly* the same as the argument to init_rwsem() --- in
  4830. * this case, in init_once() --- or lockdep gets unhappy
  4831. * because the name of the lock is set using the
  4832. * stringification of the argument to init_rwsem().
  4833. */
  4834. (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
  4835. lockdep_set_subclass(&ei->i_data_sem, subclass);
  4836. }
  4837. /*
  4838. * Standard function to be called on quota_on
  4839. */
  4840. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  4841. const struct path *path)
  4842. {
  4843. int err;
  4844. if (!test_opt(sb, QUOTA))
  4845. return -EINVAL;
  4846. /* Quotafile not on the same filesystem? */
  4847. if (path->dentry->d_sb != sb)
  4848. return -EXDEV;
  4849. /* Journaling quota? */
  4850. if (EXT4_SB(sb)->s_qf_names[type]) {
  4851. /* Quotafile not in fs root? */
  4852. if (path->dentry->d_parent != sb->s_root)
  4853. ext4_msg(sb, KERN_WARNING,
  4854. "Quota file not on filesystem root. "
  4855. "Journaled quota will not work");
  4856. sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
  4857. } else {
  4858. /*
  4859. * Clear the flag just in case mount options changed since
  4860. * last time.
  4861. */
  4862. sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
  4863. }
  4864. /*
  4865. * When we journal data on quota file, we have to flush journal to see
  4866. * all updates to the file when we bypass pagecache...
  4867. */
  4868. if (EXT4_SB(sb)->s_journal &&
  4869. ext4_should_journal_data(d_inode(path->dentry))) {
  4870. /*
  4871. * We don't need to lock updates but journal_flush() could
  4872. * otherwise be livelocked...
  4873. */
  4874. jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
  4875. err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
  4876. jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
  4877. if (err)
  4878. return err;
  4879. }
  4880. lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
  4881. err = dquot_quota_on(sb, type, format_id, path);
  4882. if (err) {
  4883. lockdep_set_quota_inode(path->dentry->d_inode,
  4884. I_DATA_SEM_NORMAL);
  4885. } else {
  4886. struct inode *inode = d_inode(path->dentry);
  4887. handle_t *handle;
  4888. /*
  4889. * Set inode flags to prevent userspace from messing with quota
  4890. * files. If this fails, we return success anyway since quotas
  4891. * are already enabled and this is not a hard failure.
  4892. */
  4893. inode_lock(inode);
  4894. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  4895. if (IS_ERR(handle))
  4896. goto unlock_inode;
  4897. EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
  4898. inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
  4899. S_NOATIME | S_IMMUTABLE);
  4900. ext4_mark_inode_dirty(handle, inode);
  4901. ext4_journal_stop(handle);
  4902. unlock_inode:
  4903. inode_unlock(inode);
  4904. }
  4905. return err;
  4906. }
  4907. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  4908. unsigned int flags)
  4909. {
  4910. int err;
  4911. struct inode *qf_inode;
  4912. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4913. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4914. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4915. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4916. };
  4917. BUG_ON(!ext4_has_feature_quota(sb));
  4918. if (!qf_inums[type])
  4919. return -EPERM;
  4920. qf_inode = ext4_iget(sb, qf_inums[type]);
  4921. if (IS_ERR(qf_inode)) {
  4922. ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
  4923. return PTR_ERR(qf_inode);
  4924. }
  4925. /* Don't account quota for quota files to avoid recursion */
  4926. qf_inode->i_flags |= S_NOQUOTA;
  4927. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
  4928. err = dquot_enable(qf_inode, type, format_id, flags);
  4929. iput(qf_inode);
  4930. if (err)
  4931. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
  4932. return err;
  4933. }
  4934. /* Enable usage tracking for all quota types. */
  4935. static int ext4_enable_quotas(struct super_block *sb)
  4936. {
  4937. int type, err = 0;
  4938. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4939. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4940. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4941. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4942. };
  4943. bool quota_mopt[EXT4_MAXQUOTAS] = {
  4944. test_opt(sb, USRQUOTA),
  4945. test_opt(sb, GRPQUOTA),
  4946. test_opt(sb, PRJQUOTA),
  4947. };
  4948. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
  4949. for (type = 0; type < EXT4_MAXQUOTAS; type++) {
  4950. if (qf_inums[type]) {
  4951. err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
  4952. DQUOT_USAGE_ENABLED |
  4953. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  4954. if (err) {
  4955. for (type--; type >= 0; type--)
  4956. dquot_quota_off(sb, type);
  4957. ext4_warning(sb,
  4958. "Failed to enable quota tracking "
  4959. "(type=%d, err=%d). Please run "
  4960. "e2fsck to fix.", type, err);
  4961. return err;
  4962. }
  4963. }
  4964. }
  4965. return 0;
  4966. }
  4967. static int ext4_quota_off(struct super_block *sb, int type)
  4968. {
  4969. struct inode *inode = sb_dqopt(sb)->files[type];
  4970. handle_t *handle;
  4971. int err;
  4972. /* Force all delayed allocation blocks to be allocated.
  4973. * Caller already holds s_umount sem */
  4974. if (test_opt(sb, DELALLOC))
  4975. sync_filesystem(sb);
  4976. if (!inode || !igrab(inode))
  4977. goto out;
  4978. err = dquot_quota_off(sb, type);
  4979. if (err || ext4_has_feature_quota(sb))
  4980. goto out_put;
  4981. inode_lock(inode);
  4982. /*
  4983. * Update modification times of quota files when userspace can
  4984. * start looking at them. If we fail, we return success anyway since
  4985. * this is not a hard failure and quotas are already disabled.
  4986. */
  4987. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  4988. if (IS_ERR(handle))
  4989. goto out_unlock;
  4990. EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
  4991. inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
  4992. inode->i_mtime = inode->i_ctime = current_time(inode);
  4993. ext4_mark_inode_dirty(handle, inode);
  4994. ext4_journal_stop(handle);
  4995. out_unlock:
  4996. inode_unlock(inode);
  4997. out_put:
  4998. lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
  4999. iput(inode);
  5000. return err;
  5001. out:
  5002. return dquot_quota_off(sb, type);
  5003. }
  5004. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  5005. * acquiring the locks... As quota files are never truncated and quota code
  5006. * itself serializes the operations (and no one else should touch the files)
  5007. * we don't have to be afraid of races */
  5008. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  5009. size_t len, loff_t off)
  5010. {
  5011. struct inode *inode = sb_dqopt(sb)->files[type];
  5012. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5013. int offset = off & (sb->s_blocksize - 1);
  5014. int tocopy;
  5015. size_t toread;
  5016. struct buffer_head *bh;
  5017. loff_t i_size = i_size_read(inode);
  5018. if (off > i_size)
  5019. return 0;
  5020. if (off+len > i_size)
  5021. len = i_size-off;
  5022. toread = len;
  5023. while (toread > 0) {
  5024. tocopy = sb->s_blocksize - offset < toread ?
  5025. sb->s_blocksize - offset : toread;
  5026. bh = ext4_bread(NULL, inode, blk, 0);
  5027. if (IS_ERR(bh))
  5028. return PTR_ERR(bh);
  5029. if (!bh) /* A hole? */
  5030. memset(data, 0, tocopy);
  5031. else
  5032. memcpy(data, bh->b_data+offset, tocopy);
  5033. brelse(bh);
  5034. offset = 0;
  5035. toread -= tocopy;
  5036. data += tocopy;
  5037. blk++;
  5038. }
  5039. return len;
  5040. }
  5041. /* Write to quotafile (we know the transaction is already started and has
  5042. * enough credits) */
  5043. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  5044. const char *data, size_t len, loff_t off)
  5045. {
  5046. struct inode *inode = sb_dqopt(sb)->files[type];
  5047. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5048. int err, offset = off & (sb->s_blocksize - 1);
  5049. int retries = 0;
  5050. struct buffer_head *bh;
  5051. handle_t *handle = journal_current_handle();
  5052. if (EXT4_SB(sb)->s_journal && !handle) {
  5053. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5054. " cancelled because transaction is not started",
  5055. (unsigned long long)off, (unsigned long long)len);
  5056. return -EIO;
  5057. }
  5058. /*
  5059. * Since we account only one data block in transaction credits,
  5060. * then it is impossible to cross a block boundary.
  5061. */
  5062. if (sb->s_blocksize - offset < len) {
  5063. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5064. " cancelled because not block aligned",
  5065. (unsigned long long)off, (unsigned long long)len);
  5066. return -EIO;
  5067. }
  5068. do {
  5069. bh = ext4_bread(handle, inode, blk,
  5070. EXT4_GET_BLOCKS_CREATE |
  5071. EXT4_GET_BLOCKS_METADATA_NOFAIL);
  5072. } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
  5073. ext4_should_retry_alloc(inode->i_sb, &retries));
  5074. if (IS_ERR(bh))
  5075. return PTR_ERR(bh);
  5076. if (!bh)
  5077. goto out;
  5078. BUFFER_TRACE(bh, "get write access");
  5079. err = ext4_journal_get_write_access(handle, bh);
  5080. if (err) {
  5081. brelse(bh);
  5082. return err;
  5083. }
  5084. lock_buffer(bh);
  5085. memcpy(bh->b_data+offset, data, len);
  5086. flush_dcache_page(bh->b_page);
  5087. unlock_buffer(bh);
  5088. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  5089. brelse(bh);
  5090. out:
  5091. if (inode->i_size < off + len) {
  5092. i_size_write(inode, off + len);
  5093. EXT4_I(inode)->i_disksize = inode->i_size;
  5094. ext4_mark_inode_dirty(handle, inode);
  5095. }
  5096. return len;
  5097. }
  5098. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
  5099. {
  5100. const struct quota_format_ops *ops;
  5101. if (!sb_has_quota_loaded(sb, qid->type))
  5102. return -ESRCH;
  5103. ops = sb_dqopt(sb)->ops[qid->type];
  5104. if (!ops || !ops->get_next_id)
  5105. return -ENOSYS;
  5106. return dquot_get_next_id(sb, qid);
  5107. }
  5108. #endif
  5109. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  5110. const char *dev_name, void *data)
  5111. {
  5112. return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
  5113. }
  5114. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  5115. static inline void register_as_ext2(void)
  5116. {
  5117. int err = register_filesystem(&ext2_fs_type);
  5118. if (err)
  5119. printk(KERN_WARNING
  5120. "EXT4-fs: Unable to register as ext2 (%d)\n", err);
  5121. }
  5122. static inline void unregister_as_ext2(void)
  5123. {
  5124. unregister_filesystem(&ext2_fs_type);
  5125. }
  5126. static inline int ext2_feature_set_ok(struct super_block *sb)
  5127. {
  5128. if (ext4_has_unknown_ext2_incompat_features(sb))
  5129. return 0;
  5130. if (sb_rdonly(sb))
  5131. return 1;
  5132. if (ext4_has_unknown_ext2_ro_compat_features(sb))
  5133. return 0;
  5134. return 1;
  5135. }
  5136. #else
  5137. static inline void register_as_ext2(void) { }
  5138. static inline void unregister_as_ext2(void) { }
  5139. static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
  5140. #endif
  5141. static inline void register_as_ext3(void)
  5142. {
  5143. int err = register_filesystem(&ext3_fs_type);
  5144. if (err)
  5145. printk(KERN_WARNING
  5146. "EXT4-fs: Unable to register as ext3 (%d)\n", err);
  5147. }
  5148. static inline void unregister_as_ext3(void)
  5149. {
  5150. unregister_filesystem(&ext3_fs_type);
  5151. }
  5152. static inline int ext3_feature_set_ok(struct super_block *sb)
  5153. {
  5154. if (ext4_has_unknown_ext3_incompat_features(sb))
  5155. return 0;
  5156. if (!ext4_has_feature_journal(sb))
  5157. return 0;
  5158. if (sb_rdonly(sb))
  5159. return 1;
  5160. if (ext4_has_unknown_ext3_ro_compat_features(sb))
  5161. return 0;
  5162. return 1;
  5163. }
  5164. static struct file_system_type ext4_fs_type = {
  5165. .owner = THIS_MODULE,
  5166. .name = "ext4",
  5167. .mount = ext4_mount,
  5168. .kill_sb = kill_block_super,
  5169. .fs_flags = FS_REQUIRES_DEV,
  5170. };
  5171. MODULE_ALIAS_FS("ext4");
  5172. /* Shared across all ext4 file systems */
  5173. wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
  5174. static int __init ext4_init_fs(void)
  5175. {
  5176. int i, err;
  5177. ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
  5178. ext4_li_info = NULL;
  5179. mutex_init(&ext4_li_mtx);
  5180. /* Build-time check for flags consistency */
  5181. ext4_check_flag_values();
  5182. for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
  5183. init_waitqueue_head(&ext4__ioend_wq[i]);
  5184. err = ext4_init_es();
  5185. if (err)
  5186. return err;
  5187. err = ext4_init_pageio();
  5188. if (err)
  5189. goto out5;
  5190. err = ext4_init_system_zone();
  5191. if (err)
  5192. goto out4;
  5193. err = ext4_init_sysfs();
  5194. if (err)
  5195. goto out3;
  5196. err = ext4_init_mballoc();
  5197. if (err)
  5198. goto out2;
  5199. err = init_inodecache();
  5200. if (err)
  5201. goto out1;
  5202. register_as_ext3();
  5203. register_as_ext2();
  5204. err = register_filesystem(&ext4_fs_type);
  5205. if (err)
  5206. goto out;
  5207. return 0;
  5208. out:
  5209. unregister_as_ext2();
  5210. unregister_as_ext3();
  5211. destroy_inodecache();
  5212. out1:
  5213. ext4_exit_mballoc();
  5214. out2:
  5215. ext4_exit_sysfs();
  5216. out3:
  5217. ext4_exit_system_zone();
  5218. out4:
  5219. ext4_exit_pageio();
  5220. out5:
  5221. ext4_exit_es();
  5222. return err;
  5223. }
  5224. static void __exit ext4_exit_fs(void)
  5225. {
  5226. ext4_destroy_lazyinit_thread();
  5227. unregister_as_ext2();
  5228. unregister_as_ext3();
  5229. unregister_filesystem(&ext4_fs_type);
  5230. destroy_inodecache();
  5231. ext4_exit_mballoc();
  5232. ext4_exit_sysfs();
  5233. ext4_exit_system_zone();
  5234. ext4_exit_pageio();
  5235. ext4_exit_es();
  5236. }
  5237. MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
  5238. MODULE_DESCRIPTION("Fourth Extended Filesystem");
  5239. MODULE_LICENSE("GPL");
  5240. module_init(ext4_init_fs)
  5241. module_exit(ext4_exit_fs)