super.c 166 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/ext4/super.c
  4. *
  5. * Copyright (C) 1992, 1993, 1994, 1995
  6. * Remy Card (card@masi.ibp.fr)
  7. * Laboratoire MASI - Institut Blaise Pascal
  8. * Universite Pierre et Marie Curie (Paris VI)
  9. *
  10. * from
  11. *
  12. * linux/fs/minix/inode.c
  13. *
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * Big-endian to little-endian byte-swapping/bitmaps by
  17. * David S. Miller (davem@caip.rutgers.edu), 1995
  18. */
  19. #include <linux/module.h>
  20. #include <linux/string.h>
  21. #include <linux/fs.h>
  22. #include <linux/time.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/slab.h>
  25. #include <linux/init.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/backing-dev.h>
  28. #include <linux/parser.h>
  29. #include <linux/buffer_head.h>
  30. #include <linux/exportfs.h>
  31. #include <linux/vfs.h>
  32. #include <linux/random.h>
  33. #include <linux/mount.h>
  34. #include <linux/namei.h>
  35. #include <linux/quotaops.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/ctype.h>
  38. #include <linux/log2.h>
  39. #include <linux/crc16.h>
  40. #include <linux/dax.h>
  41. #include <linux/cleancache.h>
  42. #include <linux/uaccess.h>
  43. #include <linux/iversion.h>
  44. #include <linux/kthread.h>
  45. #include <linux/freezer.h>
  46. #include "ext4.h"
  47. #include "ext4_extents.h" /* Needed for trace points definition */
  48. #include "ext4_jbd2.h"
  49. #include "xattr.h"
  50. #include "acl.h"
  51. #include "mballoc.h"
  52. #include "fsmap.h"
  53. #define CREATE_TRACE_POINTS
  54. #include <trace/events/ext4.h>
  55. static struct ext4_lazy_init *ext4_li_info;
  56. static struct mutex ext4_li_mtx;
  57. static struct ratelimit_state ext4_mount_msg_ratelimit;
  58. static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
  59. unsigned long journal_devnum);
  60. static int ext4_show_options(struct seq_file *seq, struct dentry *root);
  61. static int ext4_commit_super(struct super_block *sb, int sync);
  62. static void ext4_mark_recovery_complete(struct super_block *sb,
  63. struct ext4_super_block *es);
  64. static void ext4_clear_journal_err(struct super_block *sb,
  65. struct ext4_super_block *es);
  66. static int ext4_sync_fs(struct super_block *sb, int wait);
  67. static int ext4_remount(struct super_block *sb, int *flags, char *data);
  68. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
  69. static int ext4_unfreeze(struct super_block *sb);
  70. static int ext4_freeze(struct super_block *sb);
  71. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  72. const char *dev_name, void *data);
  73. static inline int ext2_feature_set_ok(struct super_block *sb);
  74. static inline int ext3_feature_set_ok(struct super_block *sb);
  75. static int ext4_feature_set_ok(struct super_block *sb, int readonly);
  76. static void ext4_destroy_lazyinit_thread(void);
  77. static void ext4_unregister_li_request(struct super_block *sb);
  78. static void ext4_clear_request_list(void);
  79. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  80. unsigned int journal_inum);
  81. /*
  82. * Lock ordering
  83. *
  84. * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
  85. * i_mmap_rwsem (inode->i_mmap_rwsem)!
  86. *
  87. * page fault path:
  88. * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
  89. * page lock -> i_data_sem (rw)
  90. *
  91. * buffered write path:
  92. * sb_start_write -> i_mutex -> mmap_sem
  93. * sb_start_write -> i_mutex -> transaction start -> page lock ->
  94. * i_data_sem (rw)
  95. *
  96. * truncate:
  97. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
  98. * i_mmap_rwsem (w) -> page lock
  99. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
  100. * transaction start -> i_data_sem (rw)
  101. *
  102. * direct IO:
  103. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) -> mmap_sem
  104. * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) ->
  105. * transaction start -> i_data_sem (rw)
  106. *
  107. * writepages:
  108. * transaction start -> page lock(s) -> i_data_sem (rw)
  109. */
  110. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  111. static struct file_system_type ext2_fs_type = {
  112. .owner = THIS_MODULE,
  113. .name = "ext2",
  114. .mount = ext4_mount,
  115. .kill_sb = kill_block_super,
  116. .fs_flags = FS_REQUIRES_DEV,
  117. };
  118. MODULE_ALIAS_FS("ext2");
  119. MODULE_ALIAS("ext2");
  120. #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
  121. #else
  122. #define IS_EXT2_SB(sb) (0)
  123. #endif
  124. static struct file_system_type ext3_fs_type = {
  125. .owner = THIS_MODULE,
  126. .name = "ext3",
  127. .mount = ext4_mount,
  128. .kill_sb = kill_block_super,
  129. .fs_flags = FS_REQUIRES_DEV,
  130. };
  131. MODULE_ALIAS_FS("ext3");
  132. MODULE_ALIAS("ext3");
  133. #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
  134. static int ext4_verify_csum_type(struct super_block *sb,
  135. struct ext4_super_block *es)
  136. {
  137. if (!ext4_has_feature_metadata_csum(sb))
  138. return 1;
  139. return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
  140. }
  141. static __le32 ext4_superblock_csum(struct super_block *sb,
  142. struct ext4_super_block *es)
  143. {
  144. struct ext4_sb_info *sbi = EXT4_SB(sb);
  145. int offset = offsetof(struct ext4_super_block, s_checksum);
  146. __u32 csum;
  147. csum = ext4_chksum(sbi, ~0, (char *)es, offset);
  148. return cpu_to_le32(csum);
  149. }
  150. static int ext4_superblock_csum_verify(struct super_block *sb,
  151. struct ext4_super_block *es)
  152. {
  153. if (!ext4_has_metadata_csum(sb))
  154. return 1;
  155. return es->s_checksum == ext4_superblock_csum(sb, es);
  156. }
  157. void ext4_superblock_csum_set(struct super_block *sb)
  158. {
  159. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  160. if (!ext4_has_metadata_csum(sb))
  161. return;
  162. es->s_checksum = ext4_superblock_csum(sb, es);
  163. }
  164. void *ext4_kvmalloc(size_t size, gfp_t flags)
  165. {
  166. void *ret;
  167. ret = kmalloc(size, flags | __GFP_NOWARN);
  168. if (!ret)
  169. ret = __vmalloc(size, flags, PAGE_KERNEL);
  170. return ret;
  171. }
  172. void *ext4_kvzalloc(size_t size, gfp_t flags)
  173. {
  174. void *ret;
  175. ret = kzalloc(size, flags | __GFP_NOWARN);
  176. if (!ret)
  177. ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
  178. return ret;
  179. }
  180. ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
  181. struct ext4_group_desc *bg)
  182. {
  183. return le32_to_cpu(bg->bg_block_bitmap_lo) |
  184. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  185. (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
  186. }
  187. ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
  188. struct ext4_group_desc *bg)
  189. {
  190. return le32_to_cpu(bg->bg_inode_bitmap_lo) |
  191. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  192. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
  193. }
  194. ext4_fsblk_t ext4_inode_table(struct super_block *sb,
  195. struct ext4_group_desc *bg)
  196. {
  197. return le32_to_cpu(bg->bg_inode_table_lo) |
  198. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  199. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
  200. }
  201. __u32 ext4_free_group_clusters(struct super_block *sb,
  202. struct ext4_group_desc *bg)
  203. {
  204. return le16_to_cpu(bg->bg_free_blocks_count_lo) |
  205. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  206. (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
  207. }
  208. __u32 ext4_free_inodes_count(struct super_block *sb,
  209. struct ext4_group_desc *bg)
  210. {
  211. return le16_to_cpu(bg->bg_free_inodes_count_lo) |
  212. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  213. (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
  214. }
  215. __u32 ext4_used_dirs_count(struct super_block *sb,
  216. struct ext4_group_desc *bg)
  217. {
  218. return le16_to_cpu(bg->bg_used_dirs_count_lo) |
  219. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  220. (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
  221. }
  222. __u32 ext4_itable_unused_count(struct super_block *sb,
  223. struct ext4_group_desc *bg)
  224. {
  225. return le16_to_cpu(bg->bg_itable_unused_lo) |
  226. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  227. (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
  228. }
  229. void ext4_block_bitmap_set(struct super_block *sb,
  230. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  231. {
  232. bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
  233. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  234. bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
  235. }
  236. void ext4_inode_bitmap_set(struct super_block *sb,
  237. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  238. {
  239. bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
  240. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  241. bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
  242. }
  243. void ext4_inode_table_set(struct super_block *sb,
  244. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  245. {
  246. bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
  247. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  248. bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
  249. }
  250. void ext4_free_group_clusters_set(struct super_block *sb,
  251. struct ext4_group_desc *bg, __u32 count)
  252. {
  253. bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
  254. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  255. bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
  256. }
  257. void ext4_free_inodes_set(struct super_block *sb,
  258. struct ext4_group_desc *bg, __u32 count)
  259. {
  260. bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
  261. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  262. bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
  263. }
  264. void ext4_used_dirs_set(struct super_block *sb,
  265. struct ext4_group_desc *bg, __u32 count)
  266. {
  267. bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
  268. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  269. bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
  270. }
  271. void ext4_itable_unused_set(struct super_block *sb,
  272. struct ext4_group_desc *bg, __u32 count)
  273. {
  274. bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
  275. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  276. bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
  277. }
  278. static void __save_error_info(struct super_block *sb, const char *func,
  279. unsigned int line)
  280. {
  281. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  282. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  283. if (bdev_read_only(sb->s_bdev))
  284. return;
  285. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  286. es->s_last_error_time = cpu_to_le32(get_seconds());
  287. strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
  288. es->s_last_error_line = cpu_to_le32(line);
  289. if (!es->s_first_error_time) {
  290. es->s_first_error_time = es->s_last_error_time;
  291. strncpy(es->s_first_error_func, func,
  292. sizeof(es->s_first_error_func));
  293. es->s_first_error_line = cpu_to_le32(line);
  294. es->s_first_error_ino = es->s_last_error_ino;
  295. es->s_first_error_block = es->s_last_error_block;
  296. }
  297. /*
  298. * Start the daily error reporting function if it hasn't been
  299. * started already
  300. */
  301. if (!es->s_error_count)
  302. mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
  303. le32_add_cpu(&es->s_error_count, 1);
  304. }
  305. static void save_error_info(struct super_block *sb, const char *func,
  306. unsigned int line)
  307. {
  308. __save_error_info(sb, func, line);
  309. ext4_commit_super(sb, 1);
  310. }
  311. /*
  312. * The del_gendisk() function uninitializes the disk-specific data
  313. * structures, including the bdi structure, without telling anyone
  314. * else. Once this happens, any attempt to call mark_buffer_dirty()
  315. * (for example, by ext4_commit_super), will cause a kernel OOPS.
  316. * This is a kludge to prevent these oops until we can put in a proper
  317. * hook in del_gendisk() to inform the VFS and file system layers.
  318. */
  319. static int block_device_ejected(struct super_block *sb)
  320. {
  321. struct inode *bd_inode = sb->s_bdev->bd_inode;
  322. struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
  323. return bdi->dev == NULL;
  324. }
  325. static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
  326. {
  327. struct super_block *sb = journal->j_private;
  328. struct ext4_sb_info *sbi = EXT4_SB(sb);
  329. int error = is_journal_aborted(journal);
  330. struct ext4_journal_cb_entry *jce;
  331. BUG_ON(txn->t_state == T_FINISHED);
  332. ext4_process_freed_data(sb, txn->t_tid);
  333. spin_lock(&sbi->s_md_lock);
  334. while (!list_empty(&txn->t_private_list)) {
  335. jce = list_entry(txn->t_private_list.next,
  336. struct ext4_journal_cb_entry, jce_list);
  337. list_del_init(&jce->jce_list);
  338. spin_unlock(&sbi->s_md_lock);
  339. jce->jce_func(sb, jce, error);
  340. spin_lock(&sbi->s_md_lock);
  341. }
  342. spin_unlock(&sbi->s_md_lock);
  343. }
  344. /* Deal with the reporting of failure conditions on a filesystem such as
  345. * inconsistencies detected or read IO failures.
  346. *
  347. * On ext2, we can store the error state of the filesystem in the
  348. * superblock. That is not possible on ext4, because we may have other
  349. * write ordering constraints on the superblock which prevent us from
  350. * writing it out straight away; and given that the journal is about to
  351. * be aborted, we can't rely on the current, or future, transactions to
  352. * write out the superblock safely.
  353. *
  354. * We'll just use the jbd2_journal_abort() error code to record an error in
  355. * the journal instead. On recovery, the journal will complain about
  356. * that error until we've noted it down and cleared it.
  357. */
  358. static void ext4_handle_error(struct super_block *sb)
  359. {
  360. if (sb_rdonly(sb))
  361. return;
  362. if (!test_opt(sb, ERRORS_CONT)) {
  363. journal_t *journal = EXT4_SB(sb)->s_journal;
  364. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  365. if (journal)
  366. jbd2_journal_abort(journal, -EIO);
  367. }
  368. if (test_opt(sb, ERRORS_RO)) {
  369. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  370. /*
  371. * Make sure updated value of ->s_mount_flags will be visible
  372. * before ->s_flags update
  373. */
  374. smp_wmb();
  375. sb->s_flags |= SB_RDONLY;
  376. }
  377. if (test_opt(sb, ERRORS_PANIC)) {
  378. if (EXT4_SB(sb)->s_journal &&
  379. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  380. return;
  381. panic("EXT4-fs (device %s): panic forced after error\n",
  382. sb->s_id);
  383. }
  384. }
  385. #define ext4_error_ratelimit(sb) \
  386. ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
  387. "EXT4-fs error")
  388. void __ext4_error(struct super_block *sb, const char *function,
  389. unsigned int line, const char *fmt, ...)
  390. {
  391. struct va_format vaf;
  392. va_list args;
  393. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  394. return;
  395. if (ext4_error_ratelimit(sb)) {
  396. va_start(args, fmt);
  397. vaf.fmt = fmt;
  398. vaf.va = &args;
  399. printk(KERN_CRIT
  400. "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
  401. sb->s_id, function, line, current->comm, &vaf);
  402. va_end(args);
  403. }
  404. save_error_info(sb, function, line);
  405. ext4_handle_error(sb);
  406. }
  407. void __ext4_error_inode(struct inode *inode, const char *function,
  408. unsigned int line, ext4_fsblk_t block,
  409. const char *fmt, ...)
  410. {
  411. va_list args;
  412. struct va_format vaf;
  413. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  414. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  415. return;
  416. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  417. es->s_last_error_block = cpu_to_le64(block);
  418. if (ext4_error_ratelimit(inode->i_sb)) {
  419. va_start(args, fmt);
  420. vaf.fmt = fmt;
  421. vaf.va = &args;
  422. if (block)
  423. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  424. "inode #%lu: block %llu: comm %s: %pV\n",
  425. inode->i_sb->s_id, function, line, inode->i_ino,
  426. block, current->comm, &vaf);
  427. else
  428. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
  429. "inode #%lu: comm %s: %pV\n",
  430. inode->i_sb->s_id, function, line, inode->i_ino,
  431. current->comm, &vaf);
  432. va_end(args);
  433. }
  434. save_error_info(inode->i_sb, function, line);
  435. ext4_handle_error(inode->i_sb);
  436. }
  437. void __ext4_error_file(struct file *file, const char *function,
  438. unsigned int line, ext4_fsblk_t block,
  439. const char *fmt, ...)
  440. {
  441. va_list args;
  442. struct va_format vaf;
  443. struct ext4_super_block *es;
  444. struct inode *inode = file_inode(file);
  445. char pathname[80], *path;
  446. if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
  447. return;
  448. es = EXT4_SB(inode->i_sb)->s_es;
  449. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  450. if (ext4_error_ratelimit(inode->i_sb)) {
  451. path = file_path(file, pathname, sizeof(pathname));
  452. if (IS_ERR(path))
  453. path = "(unknown)";
  454. va_start(args, fmt);
  455. vaf.fmt = fmt;
  456. vaf.va = &args;
  457. if (block)
  458. printk(KERN_CRIT
  459. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  460. "block %llu: comm %s: path %s: %pV\n",
  461. inode->i_sb->s_id, function, line, inode->i_ino,
  462. block, current->comm, path, &vaf);
  463. else
  464. printk(KERN_CRIT
  465. "EXT4-fs error (device %s): %s:%d: inode #%lu: "
  466. "comm %s: path %s: %pV\n",
  467. inode->i_sb->s_id, function, line, inode->i_ino,
  468. current->comm, path, &vaf);
  469. va_end(args);
  470. }
  471. save_error_info(inode->i_sb, function, line);
  472. ext4_handle_error(inode->i_sb);
  473. }
  474. const char *ext4_decode_error(struct super_block *sb, int errno,
  475. char nbuf[16])
  476. {
  477. char *errstr = NULL;
  478. switch (errno) {
  479. case -EFSCORRUPTED:
  480. errstr = "Corrupt filesystem";
  481. break;
  482. case -EFSBADCRC:
  483. errstr = "Filesystem failed CRC";
  484. break;
  485. case -EIO:
  486. errstr = "IO failure";
  487. break;
  488. case -ENOMEM:
  489. errstr = "Out of memory";
  490. break;
  491. case -EROFS:
  492. if (!sb || (EXT4_SB(sb)->s_journal &&
  493. EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
  494. errstr = "Journal has aborted";
  495. else
  496. errstr = "Readonly filesystem";
  497. break;
  498. default:
  499. /* If the caller passed in an extra buffer for unknown
  500. * errors, textualise them now. Else we just return
  501. * NULL. */
  502. if (nbuf) {
  503. /* Check for truncated error codes... */
  504. if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
  505. errstr = nbuf;
  506. }
  507. break;
  508. }
  509. return errstr;
  510. }
  511. /* __ext4_std_error decodes expected errors from journaling functions
  512. * automatically and invokes the appropriate error response. */
  513. void __ext4_std_error(struct super_block *sb, const char *function,
  514. unsigned int line, int errno)
  515. {
  516. char nbuf[16];
  517. const char *errstr;
  518. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  519. return;
  520. /* Special case: if the error is EROFS, and we're not already
  521. * inside a transaction, then there's really no point in logging
  522. * an error. */
  523. if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
  524. return;
  525. if (ext4_error_ratelimit(sb)) {
  526. errstr = ext4_decode_error(sb, errno, nbuf);
  527. printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
  528. sb->s_id, function, line, errstr);
  529. }
  530. save_error_info(sb, function, line);
  531. ext4_handle_error(sb);
  532. }
  533. /*
  534. * ext4_abort is a much stronger failure handler than ext4_error. The
  535. * abort function may be used to deal with unrecoverable failures such
  536. * as journal IO errors or ENOMEM at a critical moment in log management.
  537. *
  538. * We unconditionally force the filesystem into an ABORT|READONLY state,
  539. * unless the error response on the fs has been set to panic in which
  540. * case we take the easy way out and panic immediately.
  541. */
  542. void __ext4_abort(struct super_block *sb, const char *function,
  543. unsigned int line, const char *fmt, ...)
  544. {
  545. struct va_format vaf;
  546. va_list args;
  547. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  548. return;
  549. save_error_info(sb, function, line);
  550. va_start(args, fmt);
  551. vaf.fmt = fmt;
  552. vaf.va = &args;
  553. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
  554. sb->s_id, function, line, &vaf);
  555. va_end(args);
  556. if (sb_rdonly(sb) == 0) {
  557. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  558. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  559. /*
  560. * Make sure updated value of ->s_mount_flags will be visible
  561. * before ->s_flags update
  562. */
  563. smp_wmb();
  564. sb->s_flags |= SB_RDONLY;
  565. if (EXT4_SB(sb)->s_journal)
  566. jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
  567. save_error_info(sb, function, line);
  568. }
  569. if (test_opt(sb, ERRORS_PANIC)) {
  570. if (EXT4_SB(sb)->s_journal &&
  571. !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
  572. return;
  573. panic("EXT4-fs panic from previous error\n");
  574. }
  575. }
  576. void __ext4_msg(struct super_block *sb,
  577. const char *prefix, const char *fmt, ...)
  578. {
  579. struct va_format vaf;
  580. va_list args;
  581. if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
  582. return;
  583. va_start(args, fmt);
  584. vaf.fmt = fmt;
  585. vaf.va = &args;
  586. printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  587. va_end(args);
  588. }
  589. #define ext4_warning_ratelimit(sb) \
  590. ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
  591. "EXT4-fs warning")
  592. void __ext4_warning(struct super_block *sb, const char *function,
  593. unsigned int line, const char *fmt, ...)
  594. {
  595. struct va_format vaf;
  596. va_list args;
  597. if (!ext4_warning_ratelimit(sb))
  598. return;
  599. va_start(args, fmt);
  600. vaf.fmt = fmt;
  601. vaf.va = &args;
  602. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
  603. sb->s_id, function, line, &vaf);
  604. va_end(args);
  605. }
  606. void __ext4_warning_inode(const struct inode *inode, const char *function,
  607. unsigned int line, const char *fmt, ...)
  608. {
  609. struct va_format vaf;
  610. va_list args;
  611. if (!ext4_warning_ratelimit(inode->i_sb))
  612. return;
  613. va_start(args, fmt);
  614. vaf.fmt = fmt;
  615. vaf.va = &args;
  616. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
  617. "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
  618. function, line, inode->i_ino, current->comm, &vaf);
  619. va_end(args);
  620. }
  621. void __ext4_grp_locked_error(const char *function, unsigned int line,
  622. struct super_block *sb, ext4_group_t grp,
  623. unsigned long ino, ext4_fsblk_t block,
  624. const char *fmt, ...)
  625. __releases(bitlock)
  626. __acquires(bitlock)
  627. {
  628. struct va_format vaf;
  629. va_list args;
  630. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  631. if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
  632. return;
  633. es->s_last_error_ino = cpu_to_le32(ino);
  634. es->s_last_error_block = cpu_to_le64(block);
  635. __save_error_info(sb, function, line);
  636. if (ext4_error_ratelimit(sb)) {
  637. va_start(args, fmt);
  638. vaf.fmt = fmt;
  639. vaf.va = &args;
  640. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
  641. sb->s_id, function, line, grp);
  642. if (ino)
  643. printk(KERN_CONT "inode %lu: ", ino);
  644. if (block)
  645. printk(KERN_CONT "block %llu:",
  646. (unsigned long long) block);
  647. printk(KERN_CONT "%pV\n", &vaf);
  648. va_end(args);
  649. }
  650. if (test_opt(sb, ERRORS_CONT)) {
  651. ext4_commit_super(sb, 0);
  652. return;
  653. }
  654. ext4_unlock_group(sb, grp);
  655. ext4_commit_super(sb, 1);
  656. ext4_handle_error(sb);
  657. /*
  658. * We only get here in the ERRORS_RO case; relocking the group
  659. * may be dangerous, but nothing bad will happen since the
  660. * filesystem will have already been marked read/only and the
  661. * journal has been aborted. We return 1 as a hint to callers
  662. * who might what to use the return value from
  663. * ext4_grp_locked_error() to distinguish between the
  664. * ERRORS_CONT and ERRORS_RO case, and perhaps return more
  665. * aggressively from the ext4 function in question, with a
  666. * more appropriate error code.
  667. */
  668. ext4_lock_group(sb, grp);
  669. return;
  670. }
  671. void ext4_update_dynamic_rev(struct super_block *sb)
  672. {
  673. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  674. if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
  675. return;
  676. ext4_warning(sb,
  677. "updating to rev %d because of new feature flag, "
  678. "running e2fsck is recommended",
  679. EXT4_DYNAMIC_REV);
  680. es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
  681. es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
  682. es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
  683. /* leave es->s_feature_*compat flags alone */
  684. /* es->s_uuid will be set by e2fsck if empty */
  685. /*
  686. * The rest of the superblock fields should be zero, and if not it
  687. * means they are likely already in use, so leave them alone. We
  688. * can leave it up to e2fsck to clean up any inconsistencies there.
  689. */
  690. }
  691. /*
  692. * Open the external journal device
  693. */
  694. static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
  695. {
  696. struct block_device *bdev;
  697. char b[BDEVNAME_SIZE];
  698. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
  699. if (IS_ERR(bdev))
  700. goto fail;
  701. return bdev;
  702. fail:
  703. ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
  704. __bdevname(dev, b), PTR_ERR(bdev));
  705. return NULL;
  706. }
  707. /*
  708. * Release the journal device
  709. */
  710. static void ext4_blkdev_put(struct block_device *bdev)
  711. {
  712. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  713. }
  714. static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
  715. {
  716. struct block_device *bdev;
  717. bdev = sbi->journal_bdev;
  718. if (bdev) {
  719. ext4_blkdev_put(bdev);
  720. sbi->journal_bdev = NULL;
  721. }
  722. }
  723. static inline struct inode *orphan_list_entry(struct list_head *l)
  724. {
  725. return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
  726. }
  727. static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
  728. {
  729. struct list_head *l;
  730. ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
  731. le32_to_cpu(sbi->s_es->s_last_orphan));
  732. printk(KERN_ERR "sb_info orphan list:\n");
  733. list_for_each(l, &sbi->s_orphan) {
  734. struct inode *inode = orphan_list_entry(l);
  735. printk(KERN_ERR " "
  736. "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
  737. inode->i_sb->s_id, inode->i_ino, inode,
  738. inode->i_mode, inode->i_nlink,
  739. NEXT_ORPHAN(inode));
  740. }
  741. }
  742. #ifdef CONFIG_QUOTA
  743. static int ext4_quota_off(struct super_block *sb, int type);
  744. static inline void ext4_quota_off_umount(struct super_block *sb)
  745. {
  746. int type;
  747. /* Use our quota_off function to clear inode flags etc. */
  748. for (type = 0; type < EXT4_MAXQUOTAS; type++)
  749. ext4_quota_off(sb, type);
  750. }
  751. #else
  752. static inline void ext4_quota_off_umount(struct super_block *sb)
  753. {
  754. }
  755. #endif
  756. static void ext4_put_super(struct super_block *sb)
  757. {
  758. struct ext4_sb_info *sbi = EXT4_SB(sb);
  759. struct ext4_super_block *es = sbi->s_es;
  760. int aborted = 0;
  761. int i, err;
  762. ext4_unregister_li_request(sb);
  763. ext4_quota_off_umount(sb);
  764. destroy_workqueue(sbi->rsv_conversion_wq);
  765. if (sbi->s_journal) {
  766. aborted = is_journal_aborted(sbi->s_journal);
  767. err = jbd2_journal_destroy(sbi->s_journal);
  768. sbi->s_journal = NULL;
  769. if ((err < 0) && !aborted)
  770. ext4_abort(sb, "Couldn't clean up the journal");
  771. }
  772. ext4_unregister_sysfs(sb);
  773. ext4_es_unregister_shrinker(sbi);
  774. del_timer_sync(&sbi->s_err_report);
  775. ext4_release_system_zone(sb);
  776. ext4_mb_release(sb);
  777. ext4_ext_release(sb);
  778. if (!sb_rdonly(sb) && !aborted) {
  779. ext4_clear_feature_journal_needs_recovery(sb);
  780. es->s_state = cpu_to_le16(sbi->s_mount_state);
  781. }
  782. if (!sb_rdonly(sb))
  783. ext4_commit_super(sb, 1);
  784. for (i = 0; i < sbi->s_gdb_count; i++)
  785. brelse(sbi->s_group_desc[i]);
  786. kvfree(sbi->s_group_desc);
  787. kvfree(sbi->s_flex_groups);
  788. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  789. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  790. percpu_counter_destroy(&sbi->s_dirs_counter);
  791. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  792. percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
  793. #ifdef CONFIG_QUOTA
  794. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  795. kfree(sbi->s_qf_names[i]);
  796. #endif
  797. /* Debugging code just in case the in-memory inode orphan list
  798. * isn't empty. The on-disk one can be non-empty if we've
  799. * detected an error and taken the fs readonly, but the
  800. * in-memory list had better be clean by this point. */
  801. if (!list_empty(&sbi->s_orphan))
  802. dump_orphan_list(sb, sbi);
  803. J_ASSERT(list_empty(&sbi->s_orphan));
  804. sync_blockdev(sb->s_bdev);
  805. invalidate_bdev(sb->s_bdev);
  806. if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
  807. /*
  808. * Invalidate the journal device's buffers. We don't want them
  809. * floating about in memory - the physical journal device may
  810. * hotswapped, and it breaks the `ro-after' testing code.
  811. */
  812. sync_blockdev(sbi->journal_bdev);
  813. invalidate_bdev(sbi->journal_bdev);
  814. ext4_blkdev_remove(sbi);
  815. }
  816. if (sbi->s_ea_inode_cache) {
  817. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  818. sbi->s_ea_inode_cache = NULL;
  819. }
  820. if (sbi->s_ea_block_cache) {
  821. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  822. sbi->s_ea_block_cache = NULL;
  823. }
  824. if (sbi->s_mmp_tsk)
  825. kthread_stop(sbi->s_mmp_tsk);
  826. brelse(sbi->s_sbh);
  827. sb->s_fs_info = NULL;
  828. /*
  829. * Now that we are completely done shutting down the
  830. * superblock, we need to actually destroy the kobject.
  831. */
  832. kobject_put(&sbi->s_kobj);
  833. wait_for_completion(&sbi->s_kobj_unregister);
  834. if (sbi->s_chksum_driver)
  835. crypto_free_shash(sbi->s_chksum_driver);
  836. kfree(sbi->s_blockgroup_lock);
  837. fs_put_dax(sbi->s_daxdev);
  838. kfree(sbi);
  839. }
  840. static struct kmem_cache *ext4_inode_cachep;
  841. /*
  842. * Called inside transaction, so use GFP_NOFS
  843. */
  844. static struct inode *ext4_alloc_inode(struct super_block *sb)
  845. {
  846. struct ext4_inode_info *ei;
  847. ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
  848. if (!ei)
  849. return NULL;
  850. inode_set_iversion(&ei->vfs_inode, 1);
  851. spin_lock_init(&ei->i_raw_lock);
  852. INIT_LIST_HEAD(&ei->i_prealloc_list);
  853. spin_lock_init(&ei->i_prealloc_lock);
  854. ext4_es_init_tree(&ei->i_es_tree);
  855. rwlock_init(&ei->i_es_lock);
  856. INIT_LIST_HEAD(&ei->i_es_list);
  857. ei->i_es_all_nr = 0;
  858. ei->i_es_shk_nr = 0;
  859. ei->i_es_shrink_lblk = 0;
  860. ei->i_reserved_data_blocks = 0;
  861. ei->i_da_metadata_calc_len = 0;
  862. ei->i_da_metadata_calc_last_lblock = 0;
  863. spin_lock_init(&(ei->i_block_reservation_lock));
  864. #ifdef CONFIG_QUOTA
  865. ei->i_reserved_quota = 0;
  866. memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
  867. #endif
  868. ei->jinode = NULL;
  869. INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
  870. spin_lock_init(&ei->i_completed_io_lock);
  871. ei->i_sync_tid = 0;
  872. ei->i_datasync_tid = 0;
  873. atomic_set(&ei->i_unwritten, 0);
  874. INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
  875. return &ei->vfs_inode;
  876. }
  877. static int ext4_drop_inode(struct inode *inode)
  878. {
  879. int drop = generic_drop_inode(inode);
  880. trace_ext4_drop_inode(inode, drop);
  881. return drop;
  882. }
  883. static void ext4_i_callback(struct rcu_head *head)
  884. {
  885. struct inode *inode = container_of(head, struct inode, i_rcu);
  886. kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
  887. }
  888. static void ext4_destroy_inode(struct inode *inode)
  889. {
  890. if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
  891. ext4_msg(inode->i_sb, KERN_ERR,
  892. "Inode %lu (%p): orphan list check failed!",
  893. inode->i_ino, EXT4_I(inode));
  894. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
  895. EXT4_I(inode), sizeof(struct ext4_inode_info),
  896. true);
  897. dump_stack();
  898. }
  899. call_rcu(&inode->i_rcu, ext4_i_callback);
  900. }
  901. static void init_once(void *foo)
  902. {
  903. struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
  904. INIT_LIST_HEAD(&ei->i_orphan);
  905. init_rwsem(&ei->xattr_sem);
  906. init_rwsem(&ei->i_data_sem);
  907. init_rwsem(&ei->i_mmap_sem);
  908. inode_init_once(&ei->vfs_inode);
  909. }
  910. static int __init init_inodecache(void)
  911. {
  912. ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
  913. sizeof(struct ext4_inode_info), 0,
  914. (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
  915. SLAB_ACCOUNT),
  916. offsetof(struct ext4_inode_info, i_data),
  917. sizeof_field(struct ext4_inode_info, i_data),
  918. init_once);
  919. if (ext4_inode_cachep == NULL)
  920. return -ENOMEM;
  921. return 0;
  922. }
  923. static void destroy_inodecache(void)
  924. {
  925. /*
  926. * Make sure all delayed rcu free inodes are flushed before we
  927. * destroy cache.
  928. */
  929. rcu_barrier();
  930. kmem_cache_destroy(ext4_inode_cachep);
  931. }
  932. void ext4_clear_inode(struct inode *inode)
  933. {
  934. invalidate_inode_buffers(inode);
  935. clear_inode(inode);
  936. dquot_drop(inode);
  937. ext4_discard_preallocations(inode);
  938. ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
  939. if (EXT4_I(inode)->jinode) {
  940. jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
  941. EXT4_I(inode)->jinode);
  942. jbd2_free_inode(EXT4_I(inode)->jinode);
  943. EXT4_I(inode)->jinode = NULL;
  944. }
  945. fscrypt_put_encryption_info(inode);
  946. }
  947. static struct inode *ext4_nfs_get_inode(struct super_block *sb,
  948. u64 ino, u32 generation)
  949. {
  950. struct inode *inode;
  951. if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
  952. return ERR_PTR(-ESTALE);
  953. if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
  954. return ERR_PTR(-ESTALE);
  955. /* iget isn't really right if the inode is currently unallocated!!
  956. *
  957. * ext4_read_inode will return a bad_inode if the inode had been
  958. * deleted, so we should be safe.
  959. *
  960. * Currently we don't know the generation for parent directory, so
  961. * a generation of 0 means "accept any"
  962. */
  963. inode = ext4_iget_normal(sb, ino);
  964. if (IS_ERR(inode))
  965. return ERR_CAST(inode);
  966. if (generation && inode->i_generation != generation) {
  967. iput(inode);
  968. return ERR_PTR(-ESTALE);
  969. }
  970. return inode;
  971. }
  972. static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
  973. int fh_len, int fh_type)
  974. {
  975. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  976. ext4_nfs_get_inode);
  977. }
  978. static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
  979. int fh_len, int fh_type)
  980. {
  981. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  982. ext4_nfs_get_inode);
  983. }
  984. /*
  985. * Try to release metadata pages (indirect blocks, directories) which are
  986. * mapped via the block device. Since these pages could have journal heads
  987. * which would prevent try_to_free_buffers() from freeing them, we must use
  988. * jbd2 layer's try_to_free_buffers() function to release them.
  989. */
  990. static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
  991. gfp_t wait)
  992. {
  993. journal_t *journal = EXT4_SB(sb)->s_journal;
  994. WARN_ON(PageChecked(page));
  995. if (!page_has_buffers(page))
  996. return 0;
  997. if (journal)
  998. return jbd2_journal_try_to_free_buffers(journal, page,
  999. wait & ~__GFP_DIRECT_RECLAIM);
  1000. return try_to_free_buffers(page);
  1001. }
  1002. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1003. static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
  1004. {
  1005. return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1006. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
  1007. }
  1008. static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
  1009. void *fs_data)
  1010. {
  1011. handle_t *handle = fs_data;
  1012. int res, res2, credits, retries = 0;
  1013. /*
  1014. * Encrypting the root directory is not allowed because e2fsck expects
  1015. * lost+found to exist and be unencrypted, and encrypting the root
  1016. * directory would imply encrypting the lost+found directory as well as
  1017. * the filename "lost+found" itself.
  1018. */
  1019. if (inode->i_ino == EXT4_ROOT_INO)
  1020. return -EPERM;
  1021. if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
  1022. return -EINVAL;
  1023. res = ext4_convert_inline_data(inode);
  1024. if (res)
  1025. return res;
  1026. /*
  1027. * If a journal handle was specified, then the encryption context is
  1028. * being set on a new inode via inheritance and is part of a larger
  1029. * transaction to create the inode. Otherwise the encryption context is
  1030. * being set on an existing inode in its own transaction. Only in the
  1031. * latter case should the "retry on ENOSPC" logic be used.
  1032. */
  1033. if (handle) {
  1034. res = ext4_xattr_set_handle(handle, inode,
  1035. EXT4_XATTR_INDEX_ENCRYPTION,
  1036. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1037. ctx, len, 0);
  1038. if (!res) {
  1039. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1040. ext4_clear_inode_state(inode,
  1041. EXT4_STATE_MAY_INLINE_DATA);
  1042. /*
  1043. * Update inode->i_flags - S_ENCRYPTED will be enabled,
  1044. * S_DAX may be disabled
  1045. */
  1046. ext4_set_inode_flags(inode);
  1047. }
  1048. return res;
  1049. }
  1050. res = dquot_initialize(inode);
  1051. if (res)
  1052. return res;
  1053. retry:
  1054. res = ext4_xattr_set_credits(inode, len, false /* is_create */,
  1055. &credits);
  1056. if (res)
  1057. return res;
  1058. handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
  1059. if (IS_ERR(handle))
  1060. return PTR_ERR(handle);
  1061. res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
  1062. EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
  1063. ctx, len, 0);
  1064. if (!res) {
  1065. ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
  1066. /*
  1067. * Update inode->i_flags - S_ENCRYPTED will be enabled,
  1068. * S_DAX may be disabled
  1069. */
  1070. ext4_set_inode_flags(inode);
  1071. res = ext4_mark_inode_dirty(handle, inode);
  1072. if (res)
  1073. EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
  1074. }
  1075. res2 = ext4_journal_stop(handle);
  1076. if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  1077. goto retry;
  1078. if (!res)
  1079. res = res2;
  1080. return res;
  1081. }
  1082. static bool ext4_dummy_context(struct inode *inode)
  1083. {
  1084. return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
  1085. }
  1086. static unsigned ext4_max_namelen(struct inode *inode)
  1087. {
  1088. return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
  1089. EXT4_NAME_LEN;
  1090. }
  1091. static const struct fscrypt_operations ext4_cryptops = {
  1092. .key_prefix = "ext4:",
  1093. .get_context = ext4_get_context,
  1094. .set_context = ext4_set_context,
  1095. .dummy_context = ext4_dummy_context,
  1096. .empty_dir = ext4_empty_dir,
  1097. .max_namelen = ext4_max_namelen,
  1098. };
  1099. #endif
  1100. #ifdef CONFIG_QUOTA
  1101. static const char * const quotatypes[] = INITQFNAMES;
  1102. #define QTYPE2NAME(t) (quotatypes[t])
  1103. static int ext4_write_dquot(struct dquot *dquot);
  1104. static int ext4_acquire_dquot(struct dquot *dquot);
  1105. static int ext4_release_dquot(struct dquot *dquot);
  1106. static int ext4_mark_dquot_dirty(struct dquot *dquot);
  1107. static int ext4_write_info(struct super_block *sb, int type);
  1108. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  1109. const struct path *path);
  1110. static int ext4_quota_on_mount(struct super_block *sb, int type);
  1111. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  1112. size_t len, loff_t off);
  1113. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  1114. const char *data, size_t len, loff_t off);
  1115. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  1116. unsigned int flags);
  1117. static int ext4_enable_quotas(struct super_block *sb);
  1118. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
  1119. static struct dquot **ext4_get_dquots(struct inode *inode)
  1120. {
  1121. return EXT4_I(inode)->i_dquot;
  1122. }
  1123. static const struct dquot_operations ext4_quota_operations = {
  1124. .get_reserved_space = ext4_get_reserved_space,
  1125. .write_dquot = ext4_write_dquot,
  1126. .acquire_dquot = ext4_acquire_dquot,
  1127. .release_dquot = ext4_release_dquot,
  1128. .mark_dirty = ext4_mark_dquot_dirty,
  1129. .write_info = ext4_write_info,
  1130. .alloc_dquot = dquot_alloc,
  1131. .destroy_dquot = dquot_destroy,
  1132. .get_projid = ext4_get_projid,
  1133. .get_inode_usage = ext4_get_inode_usage,
  1134. .get_next_id = ext4_get_next_id,
  1135. };
  1136. static const struct quotactl_ops ext4_qctl_operations = {
  1137. .quota_on = ext4_quota_on,
  1138. .quota_off = ext4_quota_off,
  1139. .quota_sync = dquot_quota_sync,
  1140. .get_state = dquot_get_state,
  1141. .set_info = dquot_set_dqinfo,
  1142. .get_dqblk = dquot_get_dqblk,
  1143. .set_dqblk = dquot_set_dqblk,
  1144. .get_nextdqblk = dquot_get_next_dqblk,
  1145. };
  1146. #endif
  1147. static const struct super_operations ext4_sops = {
  1148. .alloc_inode = ext4_alloc_inode,
  1149. .destroy_inode = ext4_destroy_inode,
  1150. .write_inode = ext4_write_inode,
  1151. .dirty_inode = ext4_dirty_inode,
  1152. .drop_inode = ext4_drop_inode,
  1153. .evict_inode = ext4_evict_inode,
  1154. .put_super = ext4_put_super,
  1155. .sync_fs = ext4_sync_fs,
  1156. .freeze_fs = ext4_freeze,
  1157. .unfreeze_fs = ext4_unfreeze,
  1158. .statfs = ext4_statfs,
  1159. .remount_fs = ext4_remount,
  1160. .show_options = ext4_show_options,
  1161. #ifdef CONFIG_QUOTA
  1162. .quota_read = ext4_quota_read,
  1163. .quota_write = ext4_quota_write,
  1164. .get_dquots = ext4_get_dquots,
  1165. #endif
  1166. .bdev_try_to_free_page = bdev_try_to_free_page,
  1167. };
  1168. static const struct export_operations ext4_export_ops = {
  1169. .fh_to_dentry = ext4_fh_to_dentry,
  1170. .fh_to_parent = ext4_fh_to_parent,
  1171. .get_parent = ext4_get_parent,
  1172. };
  1173. enum {
  1174. Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
  1175. Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
  1176. Opt_nouid32, Opt_debug, Opt_removed,
  1177. Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
  1178. Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
  1179. Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
  1180. Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
  1181. Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
  1182. Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
  1183. Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
  1184. Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
  1185. Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
  1186. Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
  1187. Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
  1188. Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
  1189. Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
  1190. Opt_inode_readahead_blks, Opt_journal_ioprio,
  1191. Opt_dioread_nolock, Opt_dioread_lock,
  1192. Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
  1193. Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
  1194. };
  1195. static const match_table_t tokens = {
  1196. {Opt_bsd_df, "bsddf"},
  1197. {Opt_minix_df, "minixdf"},
  1198. {Opt_grpid, "grpid"},
  1199. {Opt_grpid, "bsdgroups"},
  1200. {Opt_nogrpid, "nogrpid"},
  1201. {Opt_nogrpid, "sysvgroups"},
  1202. {Opt_resgid, "resgid=%u"},
  1203. {Opt_resuid, "resuid=%u"},
  1204. {Opt_sb, "sb=%u"},
  1205. {Opt_err_cont, "errors=continue"},
  1206. {Opt_err_panic, "errors=panic"},
  1207. {Opt_err_ro, "errors=remount-ro"},
  1208. {Opt_nouid32, "nouid32"},
  1209. {Opt_debug, "debug"},
  1210. {Opt_removed, "oldalloc"},
  1211. {Opt_removed, "orlov"},
  1212. {Opt_user_xattr, "user_xattr"},
  1213. {Opt_nouser_xattr, "nouser_xattr"},
  1214. {Opt_acl, "acl"},
  1215. {Opt_noacl, "noacl"},
  1216. {Opt_noload, "norecovery"},
  1217. {Opt_noload, "noload"},
  1218. {Opt_removed, "nobh"},
  1219. {Opt_removed, "bh"},
  1220. {Opt_commit, "commit=%u"},
  1221. {Opt_min_batch_time, "min_batch_time=%u"},
  1222. {Opt_max_batch_time, "max_batch_time=%u"},
  1223. {Opt_journal_dev, "journal_dev=%u"},
  1224. {Opt_journal_path, "journal_path=%s"},
  1225. {Opt_journal_checksum, "journal_checksum"},
  1226. {Opt_nojournal_checksum, "nojournal_checksum"},
  1227. {Opt_journal_async_commit, "journal_async_commit"},
  1228. {Opt_abort, "abort"},
  1229. {Opt_data_journal, "data=journal"},
  1230. {Opt_data_ordered, "data=ordered"},
  1231. {Opt_data_writeback, "data=writeback"},
  1232. {Opt_data_err_abort, "data_err=abort"},
  1233. {Opt_data_err_ignore, "data_err=ignore"},
  1234. {Opt_offusrjquota, "usrjquota="},
  1235. {Opt_usrjquota, "usrjquota=%s"},
  1236. {Opt_offgrpjquota, "grpjquota="},
  1237. {Opt_grpjquota, "grpjquota=%s"},
  1238. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  1239. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  1240. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  1241. {Opt_grpquota, "grpquota"},
  1242. {Opt_noquota, "noquota"},
  1243. {Opt_quota, "quota"},
  1244. {Opt_usrquota, "usrquota"},
  1245. {Opt_prjquota, "prjquota"},
  1246. {Opt_barrier, "barrier=%u"},
  1247. {Opt_barrier, "barrier"},
  1248. {Opt_nobarrier, "nobarrier"},
  1249. {Opt_i_version, "i_version"},
  1250. {Opt_dax, "dax"},
  1251. {Opt_stripe, "stripe=%u"},
  1252. {Opt_delalloc, "delalloc"},
  1253. {Opt_lazytime, "lazytime"},
  1254. {Opt_nolazytime, "nolazytime"},
  1255. {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
  1256. {Opt_nodelalloc, "nodelalloc"},
  1257. {Opt_removed, "mblk_io_submit"},
  1258. {Opt_removed, "nomblk_io_submit"},
  1259. {Opt_block_validity, "block_validity"},
  1260. {Opt_noblock_validity, "noblock_validity"},
  1261. {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
  1262. {Opt_journal_ioprio, "journal_ioprio=%u"},
  1263. {Opt_auto_da_alloc, "auto_da_alloc=%u"},
  1264. {Opt_auto_da_alloc, "auto_da_alloc"},
  1265. {Opt_noauto_da_alloc, "noauto_da_alloc"},
  1266. {Opt_dioread_nolock, "dioread_nolock"},
  1267. {Opt_dioread_lock, "dioread_lock"},
  1268. {Opt_discard, "discard"},
  1269. {Opt_nodiscard, "nodiscard"},
  1270. {Opt_init_itable, "init_itable=%u"},
  1271. {Opt_init_itable, "init_itable"},
  1272. {Opt_noinit_itable, "noinit_itable"},
  1273. {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
  1274. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  1275. {Opt_nombcache, "nombcache"},
  1276. {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
  1277. {Opt_removed, "check=none"}, /* mount option from ext2/3 */
  1278. {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
  1279. {Opt_removed, "reservation"}, /* mount option from ext2/3 */
  1280. {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
  1281. {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
  1282. {Opt_err, NULL},
  1283. };
  1284. static ext4_fsblk_t get_sb_block(void **data)
  1285. {
  1286. ext4_fsblk_t sb_block;
  1287. char *options = (char *) *data;
  1288. if (!options || strncmp(options, "sb=", 3) != 0)
  1289. return 1; /* Default location */
  1290. options += 3;
  1291. /* TODO: use simple_strtoll with >32bit ext4 */
  1292. sb_block = simple_strtoul(options, &options, 0);
  1293. if (*options && *options != ',') {
  1294. printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
  1295. (char *) *data);
  1296. return 1;
  1297. }
  1298. if (*options == ',')
  1299. options++;
  1300. *data = (void *) options;
  1301. return sb_block;
  1302. }
  1303. #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
  1304. static const char deprecated_msg[] =
  1305. "Mount option \"%s\" will be removed by %s\n"
  1306. "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
  1307. #ifdef CONFIG_QUOTA
  1308. static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
  1309. {
  1310. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1311. char *qname;
  1312. int ret = -1;
  1313. if (sb_any_quota_loaded(sb) &&
  1314. !sbi->s_qf_names[qtype]) {
  1315. ext4_msg(sb, KERN_ERR,
  1316. "Cannot change journaled "
  1317. "quota options when quota turned on");
  1318. return -1;
  1319. }
  1320. if (ext4_has_feature_quota(sb)) {
  1321. ext4_msg(sb, KERN_INFO, "Journaled quota options "
  1322. "ignored when QUOTA feature is enabled");
  1323. return 1;
  1324. }
  1325. qname = match_strdup(args);
  1326. if (!qname) {
  1327. ext4_msg(sb, KERN_ERR,
  1328. "Not enough memory for storing quotafile name");
  1329. return -1;
  1330. }
  1331. if (sbi->s_qf_names[qtype]) {
  1332. if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
  1333. ret = 1;
  1334. else
  1335. ext4_msg(sb, KERN_ERR,
  1336. "%s quota file already specified",
  1337. QTYPE2NAME(qtype));
  1338. goto errout;
  1339. }
  1340. if (strchr(qname, '/')) {
  1341. ext4_msg(sb, KERN_ERR,
  1342. "quotafile must be on filesystem root");
  1343. goto errout;
  1344. }
  1345. sbi->s_qf_names[qtype] = qname;
  1346. set_opt(sb, QUOTA);
  1347. return 1;
  1348. errout:
  1349. kfree(qname);
  1350. return ret;
  1351. }
  1352. static int clear_qf_name(struct super_block *sb, int qtype)
  1353. {
  1354. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1355. if (sb_any_quota_loaded(sb) &&
  1356. sbi->s_qf_names[qtype]) {
  1357. ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
  1358. " when quota turned on");
  1359. return -1;
  1360. }
  1361. kfree(sbi->s_qf_names[qtype]);
  1362. sbi->s_qf_names[qtype] = NULL;
  1363. return 1;
  1364. }
  1365. #endif
  1366. #define MOPT_SET 0x0001
  1367. #define MOPT_CLEAR 0x0002
  1368. #define MOPT_NOSUPPORT 0x0004
  1369. #define MOPT_EXPLICIT 0x0008
  1370. #define MOPT_CLEAR_ERR 0x0010
  1371. #define MOPT_GTE0 0x0020
  1372. #ifdef CONFIG_QUOTA
  1373. #define MOPT_Q 0
  1374. #define MOPT_QFMT 0x0040
  1375. #else
  1376. #define MOPT_Q MOPT_NOSUPPORT
  1377. #define MOPT_QFMT MOPT_NOSUPPORT
  1378. #endif
  1379. #define MOPT_DATAJ 0x0080
  1380. #define MOPT_NO_EXT2 0x0100
  1381. #define MOPT_NO_EXT3 0x0200
  1382. #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
  1383. #define MOPT_STRING 0x0400
  1384. static const struct mount_opts {
  1385. int token;
  1386. int mount_opt;
  1387. int flags;
  1388. } ext4_mount_opts[] = {
  1389. {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
  1390. {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
  1391. {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
  1392. {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
  1393. {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
  1394. {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
  1395. {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1396. MOPT_EXT4_ONLY | MOPT_SET},
  1397. {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
  1398. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1399. {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
  1400. {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
  1401. {Opt_delalloc, EXT4_MOUNT_DELALLOC,
  1402. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1403. {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
  1404. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1405. {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1406. MOPT_EXT4_ONLY | MOPT_CLEAR},
  1407. {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
  1408. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1409. {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
  1410. EXT4_MOUNT_JOURNAL_CHECKSUM),
  1411. MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
  1412. {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
  1413. {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
  1414. {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
  1415. {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
  1416. {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
  1417. MOPT_NO_EXT2},
  1418. {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
  1419. MOPT_NO_EXT2},
  1420. {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
  1421. {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
  1422. {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
  1423. {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
  1424. {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
  1425. {Opt_commit, 0, MOPT_GTE0},
  1426. {Opt_max_batch_time, 0, MOPT_GTE0},
  1427. {Opt_min_batch_time, 0, MOPT_GTE0},
  1428. {Opt_inode_readahead_blks, 0, MOPT_GTE0},
  1429. {Opt_init_itable, 0, MOPT_GTE0},
  1430. {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
  1431. {Opt_stripe, 0, MOPT_GTE0},
  1432. {Opt_resuid, 0, MOPT_GTE0},
  1433. {Opt_resgid, 0, MOPT_GTE0},
  1434. {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1435. {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
  1436. {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
  1437. {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1438. {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
  1439. {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
  1440. MOPT_NO_EXT2 | MOPT_DATAJ},
  1441. {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
  1442. {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
  1443. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  1444. {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
  1445. {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
  1446. #else
  1447. {Opt_acl, 0, MOPT_NOSUPPORT},
  1448. {Opt_noacl, 0, MOPT_NOSUPPORT},
  1449. #endif
  1450. {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
  1451. {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
  1452. {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
  1453. {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
  1454. {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
  1455. MOPT_SET | MOPT_Q},
  1456. {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
  1457. MOPT_SET | MOPT_Q},
  1458. {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
  1459. MOPT_SET | MOPT_Q},
  1460. {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
  1461. EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
  1462. MOPT_CLEAR | MOPT_Q},
  1463. {Opt_usrjquota, 0, MOPT_Q},
  1464. {Opt_grpjquota, 0, MOPT_Q},
  1465. {Opt_offusrjquota, 0, MOPT_Q},
  1466. {Opt_offgrpjquota, 0, MOPT_Q},
  1467. {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
  1468. {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
  1469. {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
  1470. {Opt_max_dir_size_kb, 0, MOPT_GTE0},
  1471. {Opt_test_dummy_encryption, 0, MOPT_GTE0},
  1472. {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
  1473. {Opt_err, 0, 0}
  1474. };
  1475. static int handle_mount_opt(struct super_block *sb, char *opt, int token,
  1476. substring_t *args, unsigned long *journal_devnum,
  1477. unsigned int *journal_ioprio, int is_remount)
  1478. {
  1479. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1480. const struct mount_opts *m;
  1481. kuid_t uid;
  1482. kgid_t gid;
  1483. int arg = 0;
  1484. #ifdef CONFIG_QUOTA
  1485. if (token == Opt_usrjquota)
  1486. return set_qf_name(sb, USRQUOTA, &args[0]);
  1487. else if (token == Opt_grpjquota)
  1488. return set_qf_name(sb, GRPQUOTA, &args[0]);
  1489. else if (token == Opt_offusrjquota)
  1490. return clear_qf_name(sb, USRQUOTA);
  1491. else if (token == Opt_offgrpjquota)
  1492. return clear_qf_name(sb, GRPQUOTA);
  1493. #endif
  1494. switch (token) {
  1495. case Opt_noacl:
  1496. case Opt_nouser_xattr:
  1497. ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
  1498. break;
  1499. case Opt_sb:
  1500. return 1; /* handled by get_sb_block() */
  1501. case Opt_removed:
  1502. ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
  1503. return 1;
  1504. case Opt_abort:
  1505. sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
  1506. return 1;
  1507. case Opt_i_version:
  1508. sb->s_flags |= SB_I_VERSION;
  1509. return 1;
  1510. case Opt_lazytime:
  1511. sb->s_flags |= SB_LAZYTIME;
  1512. return 1;
  1513. case Opt_nolazytime:
  1514. sb->s_flags &= ~SB_LAZYTIME;
  1515. return 1;
  1516. }
  1517. for (m = ext4_mount_opts; m->token != Opt_err; m++)
  1518. if (token == m->token)
  1519. break;
  1520. if (m->token == Opt_err) {
  1521. ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
  1522. "or missing value", opt);
  1523. return -1;
  1524. }
  1525. if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
  1526. ext4_msg(sb, KERN_ERR,
  1527. "Mount option \"%s\" incompatible with ext2", opt);
  1528. return -1;
  1529. }
  1530. if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
  1531. ext4_msg(sb, KERN_ERR,
  1532. "Mount option \"%s\" incompatible with ext3", opt);
  1533. return -1;
  1534. }
  1535. if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
  1536. return -1;
  1537. if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
  1538. return -1;
  1539. if (m->flags & MOPT_EXPLICIT) {
  1540. if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
  1541. set_opt2(sb, EXPLICIT_DELALLOC);
  1542. } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
  1543. set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
  1544. } else
  1545. return -1;
  1546. }
  1547. if (m->flags & MOPT_CLEAR_ERR)
  1548. clear_opt(sb, ERRORS_MASK);
  1549. if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
  1550. ext4_msg(sb, KERN_ERR, "Cannot change quota "
  1551. "options when quota turned on");
  1552. return -1;
  1553. }
  1554. if (m->flags & MOPT_NOSUPPORT) {
  1555. ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
  1556. } else if (token == Opt_commit) {
  1557. if (arg == 0)
  1558. arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
  1559. sbi->s_commit_interval = HZ * arg;
  1560. } else if (token == Opt_debug_want_extra_isize) {
  1561. sbi->s_want_extra_isize = arg;
  1562. } else if (token == Opt_max_batch_time) {
  1563. sbi->s_max_batch_time = arg;
  1564. } else if (token == Opt_min_batch_time) {
  1565. sbi->s_min_batch_time = arg;
  1566. } else if (token == Opt_inode_readahead_blks) {
  1567. if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
  1568. ext4_msg(sb, KERN_ERR,
  1569. "EXT4-fs: inode_readahead_blks must be "
  1570. "0 or a power of 2 smaller than 2^31");
  1571. return -1;
  1572. }
  1573. sbi->s_inode_readahead_blks = arg;
  1574. } else if (token == Opt_init_itable) {
  1575. set_opt(sb, INIT_INODE_TABLE);
  1576. if (!args->from)
  1577. arg = EXT4_DEF_LI_WAIT_MULT;
  1578. sbi->s_li_wait_mult = arg;
  1579. } else if (token == Opt_max_dir_size_kb) {
  1580. sbi->s_max_dir_size_kb = arg;
  1581. } else if (token == Opt_stripe) {
  1582. sbi->s_stripe = arg;
  1583. } else if (token == Opt_resuid) {
  1584. uid = make_kuid(current_user_ns(), arg);
  1585. if (!uid_valid(uid)) {
  1586. ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
  1587. return -1;
  1588. }
  1589. sbi->s_resuid = uid;
  1590. } else if (token == Opt_resgid) {
  1591. gid = make_kgid(current_user_ns(), arg);
  1592. if (!gid_valid(gid)) {
  1593. ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
  1594. return -1;
  1595. }
  1596. sbi->s_resgid = gid;
  1597. } else if (token == Opt_journal_dev) {
  1598. if (is_remount) {
  1599. ext4_msg(sb, KERN_ERR,
  1600. "Cannot specify journal on remount");
  1601. return -1;
  1602. }
  1603. *journal_devnum = arg;
  1604. } else if (token == Opt_journal_path) {
  1605. char *journal_path;
  1606. struct inode *journal_inode;
  1607. struct path path;
  1608. int error;
  1609. if (is_remount) {
  1610. ext4_msg(sb, KERN_ERR,
  1611. "Cannot specify journal on remount");
  1612. return -1;
  1613. }
  1614. journal_path = match_strdup(&args[0]);
  1615. if (!journal_path) {
  1616. ext4_msg(sb, KERN_ERR, "error: could not dup "
  1617. "journal device string");
  1618. return -1;
  1619. }
  1620. error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
  1621. if (error) {
  1622. ext4_msg(sb, KERN_ERR, "error: could not find "
  1623. "journal device path: error %d", error);
  1624. kfree(journal_path);
  1625. return -1;
  1626. }
  1627. journal_inode = d_inode(path.dentry);
  1628. if (!S_ISBLK(journal_inode->i_mode)) {
  1629. ext4_msg(sb, KERN_ERR, "error: journal path %s "
  1630. "is not a block device", journal_path);
  1631. path_put(&path);
  1632. kfree(journal_path);
  1633. return -1;
  1634. }
  1635. *journal_devnum = new_encode_dev(journal_inode->i_rdev);
  1636. path_put(&path);
  1637. kfree(journal_path);
  1638. } else if (token == Opt_journal_ioprio) {
  1639. if (arg > 7) {
  1640. ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
  1641. " (must be 0-7)");
  1642. return -1;
  1643. }
  1644. *journal_ioprio =
  1645. IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
  1646. } else if (token == Opt_test_dummy_encryption) {
  1647. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  1648. sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
  1649. ext4_msg(sb, KERN_WARNING,
  1650. "Test dummy encryption mode enabled");
  1651. #else
  1652. ext4_msg(sb, KERN_WARNING,
  1653. "Test dummy encryption mount option ignored");
  1654. #endif
  1655. } else if (m->flags & MOPT_DATAJ) {
  1656. if (is_remount) {
  1657. if (!sbi->s_journal)
  1658. ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
  1659. else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
  1660. ext4_msg(sb, KERN_ERR,
  1661. "Cannot change data mode on remount");
  1662. return -1;
  1663. }
  1664. } else {
  1665. clear_opt(sb, DATA_FLAGS);
  1666. sbi->s_mount_opt |= m->mount_opt;
  1667. }
  1668. #ifdef CONFIG_QUOTA
  1669. } else if (m->flags & MOPT_QFMT) {
  1670. if (sb_any_quota_loaded(sb) &&
  1671. sbi->s_jquota_fmt != m->mount_opt) {
  1672. ext4_msg(sb, KERN_ERR, "Cannot change journaled "
  1673. "quota options when quota turned on");
  1674. return -1;
  1675. }
  1676. if (ext4_has_feature_quota(sb)) {
  1677. ext4_msg(sb, KERN_INFO,
  1678. "Quota format mount options ignored "
  1679. "when QUOTA feature is enabled");
  1680. return 1;
  1681. }
  1682. sbi->s_jquota_fmt = m->mount_opt;
  1683. #endif
  1684. } else if (token == Opt_dax) {
  1685. #ifdef CONFIG_FS_DAX
  1686. ext4_msg(sb, KERN_WARNING,
  1687. "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
  1688. sbi->s_mount_opt |= m->mount_opt;
  1689. #else
  1690. ext4_msg(sb, KERN_INFO, "dax option not supported");
  1691. return -1;
  1692. #endif
  1693. } else if (token == Opt_data_err_abort) {
  1694. sbi->s_mount_opt |= m->mount_opt;
  1695. } else if (token == Opt_data_err_ignore) {
  1696. sbi->s_mount_opt &= ~m->mount_opt;
  1697. } else {
  1698. if (!args->from)
  1699. arg = 1;
  1700. if (m->flags & MOPT_CLEAR)
  1701. arg = !arg;
  1702. else if (unlikely(!(m->flags & MOPT_SET))) {
  1703. ext4_msg(sb, KERN_WARNING,
  1704. "buggy handling of option %s", opt);
  1705. WARN_ON(1);
  1706. return -1;
  1707. }
  1708. if (arg != 0)
  1709. sbi->s_mount_opt |= m->mount_opt;
  1710. else
  1711. sbi->s_mount_opt &= ~m->mount_opt;
  1712. }
  1713. return 1;
  1714. }
  1715. static int parse_options(char *options, struct super_block *sb,
  1716. unsigned long *journal_devnum,
  1717. unsigned int *journal_ioprio,
  1718. int is_remount)
  1719. {
  1720. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1721. char *p;
  1722. substring_t args[MAX_OPT_ARGS];
  1723. int token;
  1724. if (!options)
  1725. return 1;
  1726. while ((p = strsep(&options, ",")) != NULL) {
  1727. if (!*p)
  1728. continue;
  1729. /*
  1730. * Initialize args struct so we know whether arg was
  1731. * found; some options take optional arguments.
  1732. */
  1733. args[0].to = args[0].from = NULL;
  1734. token = match_token(p, tokens, args);
  1735. if (handle_mount_opt(sb, p, token, args, journal_devnum,
  1736. journal_ioprio, is_remount) < 0)
  1737. return 0;
  1738. }
  1739. #ifdef CONFIG_QUOTA
  1740. /*
  1741. * We do the test below only for project quotas. 'usrquota' and
  1742. * 'grpquota' mount options are allowed even without quota feature
  1743. * to support legacy quotas in quota files.
  1744. */
  1745. if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
  1746. ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
  1747. "Cannot enable project quota enforcement.");
  1748. return 0;
  1749. }
  1750. if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  1751. if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
  1752. clear_opt(sb, USRQUOTA);
  1753. if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
  1754. clear_opt(sb, GRPQUOTA);
  1755. if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
  1756. ext4_msg(sb, KERN_ERR, "old and new quota "
  1757. "format mixing");
  1758. return 0;
  1759. }
  1760. if (!sbi->s_jquota_fmt) {
  1761. ext4_msg(sb, KERN_ERR, "journaled quota format "
  1762. "not specified");
  1763. return 0;
  1764. }
  1765. }
  1766. #endif
  1767. if (test_opt(sb, DIOREAD_NOLOCK)) {
  1768. int blocksize =
  1769. BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
  1770. if (blocksize < PAGE_SIZE) {
  1771. ext4_msg(sb, KERN_ERR, "can't mount with "
  1772. "dioread_nolock if block size != PAGE_SIZE");
  1773. return 0;
  1774. }
  1775. }
  1776. return 1;
  1777. }
  1778. static inline void ext4_show_quota_options(struct seq_file *seq,
  1779. struct super_block *sb)
  1780. {
  1781. #if defined(CONFIG_QUOTA)
  1782. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1783. if (sbi->s_jquota_fmt) {
  1784. char *fmtname = "";
  1785. switch (sbi->s_jquota_fmt) {
  1786. case QFMT_VFS_OLD:
  1787. fmtname = "vfsold";
  1788. break;
  1789. case QFMT_VFS_V0:
  1790. fmtname = "vfsv0";
  1791. break;
  1792. case QFMT_VFS_V1:
  1793. fmtname = "vfsv1";
  1794. break;
  1795. }
  1796. seq_printf(seq, ",jqfmt=%s", fmtname);
  1797. }
  1798. if (sbi->s_qf_names[USRQUOTA])
  1799. seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
  1800. if (sbi->s_qf_names[GRPQUOTA])
  1801. seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
  1802. #endif
  1803. }
  1804. static const char *token2str(int token)
  1805. {
  1806. const struct match_token *t;
  1807. for (t = tokens; t->token != Opt_err; t++)
  1808. if (t->token == token && !strchr(t->pattern, '='))
  1809. break;
  1810. return t->pattern;
  1811. }
  1812. /*
  1813. * Show an option if
  1814. * - it's set to a non-default value OR
  1815. * - if the per-sb default is different from the global default
  1816. */
  1817. static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
  1818. int nodefs)
  1819. {
  1820. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1821. struct ext4_super_block *es = sbi->s_es;
  1822. int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
  1823. const struct mount_opts *m;
  1824. char sep = nodefs ? '\n' : ',';
  1825. #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
  1826. #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
  1827. if (sbi->s_sb_block != 1)
  1828. SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
  1829. for (m = ext4_mount_opts; m->token != Opt_err; m++) {
  1830. int want_set = m->flags & MOPT_SET;
  1831. if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
  1832. (m->flags & MOPT_CLEAR_ERR))
  1833. continue;
  1834. if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
  1835. continue; /* skip if same as the default */
  1836. if ((want_set &&
  1837. (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
  1838. (!want_set && (sbi->s_mount_opt & m->mount_opt)))
  1839. continue; /* select Opt_noFoo vs Opt_Foo */
  1840. SEQ_OPTS_PRINT("%s", token2str(m->token));
  1841. }
  1842. if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
  1843. le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
  1844. SEQ_OPTS_PRINT("resuid=%u",
  1845. from_kuid_munged(&init_user_ns, sbi->s_resuid));
  1846. if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
  1847. le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
  1848. SEQ_OPTS_PRINT("resgid=%u",
  1849. from_kgid_munged(&init_user_ns, sbi->s_resgid));
  1850. def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
  1851. if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
  1852. SEQ_OPTS_PUTS("errors=remount-ro");
  1853. if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
  1854. SEQ_OPTS_PUTS("errors=continue");
  1855. if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
  1856. SEQ_OPTS_PUTS("errors=panic");
  1857. if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
  1858. SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
  1859. if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
  1860. SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
  1861. if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
  1862. SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
  1863. if (sb->s_flags & SB_I_VERSION)
  1864. SEQ_OPTS_PUTS("i_version");
  1865. if (nodefs || sbi->s_stripe)
  1866. SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
  1867. if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
  1868. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  1869. SEQ_OPTS_PUTS("data=journal");
  1870. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  1871. SEQ_OPTS_PUTS("data=ordered");
  1872. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
  1873. SEQ_OPTS_PUTS("data=writeback");
  1874. }
  1875. if (nodefs ||
  1876. sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
  1877. SEQ_OPTS_PRINT("inode_readahead_blks=%u",
  1878. sbi->s_inode_readahead_blks);
  1879. if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
  1880. (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
  1881. SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
  1882. if (nodefs || sbi->s_max_dir_size_kb)
  1883. SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
  1884. if (test_opt(sb, DATA_ERR_ABORT))
  1885. SEQ_OPTS_PUTS("data_err=abort");
  1886. ext4_show_quota_options(seq, sb);
  1887. return 0;
  1888. }
  1889. static int ext4_show_options(struct seq_file *seq, struct dentry *root)
  1890. {
  1891. return _ext4_show_options(seq, root->d_sb, 0);
  1892. }
  1893. int ext4_seq_options_show(struct seq_file *seq, void *offset)
  1894. {
  1895. struct super_block *sb = seq->private;
  1896. int rc;
  1897. seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
  1898. rc = _ext4_show_options(seq, sb, 1);
  1899. seq_puts(seq, "\n");
  1900. return rc;
  1901. }
  1902. static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
  1903. int read_only)
  1904. {
  1905. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1906. int res = 0;
  1907. if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
  1908. ext4_msg(sb, KERN_ERR, "revision level too high, "
  1909. "forcing read-only mode");
  1910. res = SB_RDONLY;
  1911. }
  1912. if (read_only)
  1913. goto done;
  1914. if (!(sbi->s_mount_state & EXT4_VALID_FS))
  1915. ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
  1916. "running e2fsck is recommended");
  1917. else if (sbi->s_mount_state & EXT4_ERROR_FS)
  1918. ext4_msg(sb, KERN_WARNING,
  1919. "warning: mounting fs with errors, "
  1920. "running e2fsck is recommended");
  1921. else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
  1922. le16_to_cpu(es->s_mnt_count) >=
  1923. (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
  1924. ext4_msg(sb, KERN_WARNING,
  1925. "warning: maximal mount count reached, "
  1926. "running e2fsck is recommended");
  1927. else if (le32_to_cpu(es->s_checkinterval) &&
  1928. (le32_to_cpu(es->s_lastcheck) +
  1929. le32_to_cpu(es->s_checkinterval) <= get_seconds()))
  1930. ext4_msg(sb, KERN_WARNING,
  1931. "warning: checktime reached, "
  1932. "running e2fsck is recommended");
  1933. if (!sbi->s_journal)
  1934. es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  1935. if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
  1936. es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
  1937. le16_add_cpu(&es->s_mnt_count, 1);
  1938. es->s_mtime = cpu_to_le32(get_seconds());
  1939. ext4_update_dynamic_rev(sb);
  1940. if (sbi->s_journal)
  1941. ext4_set_feature_journal_needs_recovery(sb);
  1942. ext4_commit_super(sb, 1);
  1943. done:
  1944. if (test_opt(sb, DEBUG))
  1945. printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
  1946. "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
  1947. sb->s_blocksize,
  1948. sbi->s_groups_count,
  1949. EXT4_BLOCKS_PER_GROUP(sb),
  1950. EXT4_INODES_PER_GROUP(sb),
  1951. sbi->s_mount_opt, sbi->s_mount_opt2);
  1952. cleancache_init_fs(sb);
  1953. return res;
  1954. }
  1955. int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
  1956. {
  1957. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1958. struct flex_groups *new_groups;
  1959. int size;
  1960. if (!sbi->s_log_groups_per_flex)
  1961. return 0;
  1962. size = ext4_flex_group(sbi, ngroup - 1) + 1;
  1963. if (size <= sbi->s_flex_groups_allocated)
  1964. return 0;
  1965. size = roundup_pow_of_two(size * sizeof(struct flex_groups));
  1966. new_groups = kvzalloc(size, GFP_KERNEL);
  1967. if (!new_groups) {
  1968. ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
  1969. size / (int) sizeof(struct flex_groups));
  1970. return -ENOMEM;
  1971. }
  1972. if (sbi->s_flex_groups) {
  1973. memcpy(new_groups, sbi->s_flex_groups,
  1974. (sbi->s_flex_groups_allocated *
  1975. sizeof(struct flex_groups)));
  1976. kvfree(sbi->s_flex_groups);
  1977. }
  1978. sbi->s_flex_groups = new_groups;
  1979. sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
  1980. return 0;
  1981. }
  1982. static int ext4_fill_flex_info(struct super_block *sb)
  1983. {
  1984. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1985. struct ext4_group_desc *gdp = NULL;
  1986. ext4_group_t flex_group;
  1987. int i, err;
  1988. sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
  1989. if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
  1990. sbi->s_log_groups_per_flex = 0;
  1991. return 1;
  1992. }
  1993. err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
  1994. if (err)
  1995. goto failed;
  1996. for (i = 0; i < sbi->s_groups_count; i++) {
  1997. gdp = ext4_get_group_desc(sb, i, NULL);
  1998. flex_group = ext4_flex_group(sbi, i);
  1999. atomic_add(ext4_free_inodes_count(sb, gdp),
  2000. &sbi->s_flex_groups[flex_group].free_inodes);
  2001. atomic64_add(ext4_free_group_clusters(sb, gdp),
  2002. &sbi->s_flex_groups[flex_group].free_clusters);
  2003. atomic_add(ext4_used_dirs_count(sb, gdp),
  2004. &sbi->s_flex_groups[flex_group].used_dirs);
  2005. }
  2006. return 1;
  2007. failed:
  2008. return 0;
  2009. }
  2010. static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
  2011. struct ext4_group_desc *gdp)
  2012. {
  2013. int offset = offsetof(struct ext4_group_desc, bg_checksum);
  2014. __u16 crc = 0;
  2015. __le32 le_group = cpu_to_le32(block_group);
  2016. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2017. if (ext4_has_metadata_csum(sbi->s_sb)) {
  2018. /* Use new metadata_csum algorithm */
  2019. __u32 csum32;
  2020. __u16 dummy_csum = 0;
  2021. csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
  2022. sizeof(le_group));
  2023. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
  2024. csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
  2025. sizeof(dummy_csum));
  2026. offset += sizeof(dummy_csum);
  2027. if (offset < sbi->s_desc_size)
  2028. csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
  2029. sbi->s_desc_size - offset);
  2030. crc = csum32 & 0xFFFF;
  2031. goto out;
  2032. }
  2033. /* old crc16 code */
  2034. if (!ext4_has_feature_gdt_csum(sb))
  2035. return 0;
  2036. crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
  2037. crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
  2038. crc = crc16(crc, (__u8 *)gdp, offset);
  2039. offset += sizeof(gdp->bg_checksum); /* skip checksum */
  2040. /* for checksum of struct ext4_group_desc do the rest...*/
  2041. if (ext4_has_feature_64bit(sb) &&
  2042. offset < le16_to_cpu(sbi->s_es->s_desc_size))
  2043. crc = crc16(crc, (__u8 *)gdp + offset,
  2044. le16_to_cpu(sbi->s_es->s_desc_size) -
  2045. offset);
  2046. out:
  2047. return cpu_to_le16(crc);
  2048. }
  2049. int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
  2050. struct ext4_group_desc *gdp)
  2051. {
  2052. if (ext4_has_group_desc_csum(sb) &&
  2053. (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
  2054. return 0;
  2055. return 1;
  2056. }
  2057. void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
  2058. struct ext4_group_desc *gdp)
  2059. {
  2060. if (!ext4_has_group_desc_csum(sb))
  2061. return;
  2062. gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
  2063. }
  2064. /* Called at mount-time, super-block is locked */
  2065. static int ext4_check_descriptors(struct super_block *sb,
  2066. ext4_fsblk_t sb_block,
  2067. ext4_group_t *first_not_zeroed)
  2068. {
  2069. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2070. ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
  2071. ext4_fsblk_t last_block;
  2072. ext4_fsblk_t block_bitmap;
  2073. ext4_fsblk_t inode_bitmap;
  2074. ext4_fsblk_t inode_table;
  2075. int flexbg_flag = 0;
  2076. ext4_group_t i, grp = sbi->s_groups_count;
  2077. if (ext4_has_feature_flex_bg(sb))
  2078. flexbg_flag = 1;
  2079. ext4_debug("Checking group descriptors");
  2080. for (i = 0; i < sbi->s_groups_count; i++) {
  2081. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  2082. if (i == sbi->s_groups_count - 1 || flexbg_flag)
  2083. last_block = ext4_blocks_count(sbi->s_es) - 1;
  2084. else
  2085. last_block = first_block +
  2086. (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  2087. if ((grp == sbi->s_groups_count) &&
  2088. !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2089. grp = i;
  2090. block_bitmap = ext4_block_bitmap(sb, gdp);
  2091. if (block_bitmap == sb_block) {
  2092. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2093. "Block bitmap for group %u overlaps "
  2094. "superblock", i);
  2095. }
  2096. if (block_bitmap < first_block || block_bitmap > last_block) {
  2097. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2098. "Block bitmap for group %u not in group "
  2099. "(block %llu)!", i, block_bitmap);
  2100. return 0;
  2101. }
  2102. inode_bitmap = ext4_inode_bitmap(sb, gdp);
  2103. if (inode_bitmap == sb_block) {
  2104. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2105. "Inode bitmap for group %u overlaps "
  2106. "superblock", i);
  2107. }
  2108. if (inode_bitmap < first_block || inode_bitmap > last_block) {
  2109. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2110. "Inode bitmap for group %u not in group "
  2111. "(block %llu)!", i, inode_bitmap);
  2112. return 0;
  2113. }
  2114. inode_table = ext4_inode_table(sb, gdp);
  2115. if (inode_table == sb_block) {
  2116. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2117. "Inode table for group %u overlaps "
  2118. "superblock", i);
  2119. }
  2120. if (inode_table < first_block ||
  2121. inode_table + sbi->s_itb_per_group - 1 > last_block) {
  2122. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2123. "Inode table for group %u not in group "
  2124. "(block %llu)!", i, inode_table);
  2125. return 0;
  2126. }
  2127. ext4_lock_group(sb, i);
  2128. if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
  2129. ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
  2130. "Checksum for group %u failed (%u!=%u)",
  2131. i, le16_to_cpu(ext4_group_desc_csum(sb, i,
  2132. gdp)), le16_to_cpu(gdp->bg_checksum));
  2133. if (!sb_rdonly(sb)) {
  2134. ext4_unlock_group(sb, i);
  2135. return 0;
  2136. }
  2137. }
  2138. ext4_unlock_group(sb, i);
  2139. if (!flexbg_flag)
  2140. first_block += EXT4_BLOCKS_PER_GROUP(sb);
  2141. }
  2142. if (NULL != first_not_zeroed)
  2143. *first_not_zeroed = grp;
  2144. return 1;
  2145. }
  2146. /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
  2147. * the superblock) which were deleted from all directories, but held open by
  2148. * a process at the time of a crash. We walk the list and try to delete these
  2149. * inodes at recovery time (only with a read-write filesystem).
  2150. *
  2151. * In order to keep the orphan inode chain consistent during traversal (in
  2152. * case of crash during recovery), we link each inode into the superblock
  2153. * orphan list_head and handle it the same way as an inode deletion during
  2154. * normal operation (which journals the operations for us).
  2155. *
  2156. * We only do an iget() and an iput() on each inode, which is very safe if we
  2157. * accidentally point at an in-use or already deleted inode. The worst that
  2158. * can happen in this case is that we get a "bit already cleared" message from
  2159. * ext4_free_inode(). The only reason we would point at a wrong inode is if
  2160. * e2fsck was run on this filesystem, and it must have already done the orphan
  2161. * inode cleanup for us, so we can safely abort without any further action.
  2162. */
  2163. static void ext4_orphan_cleanup(struct super_block *sb,
  2164. struct ext4_super_block *es)
  2165. {
  2166. unsigned int s_flags = sb->s_flags;
  2167. int ret, nr_orphans = 0, nr_truncates = 0;
  2168. #ifdef CONFIG_QUOTA
  2169. int quota_update = 0;
  2170. int i;
  2171. #endif
  2172. if (!es->s_last_orphan) {
  2173. jbd_debug(4, "no orphan inodes to clean up\n");
  2174. return;
  2175. }
  2176. if (bdev_read_only(sb->s_bdev)) {
  2177. ext4_msg(sb, KERN_ERR, "write access "
  2178. "unavailable, skipping orphan cleanup");
  2179. return;
  2180. }
  2181. /* Check if feature set would not allow a r/w mount */
  2182. if (!ext4_feature_set_ok(sb, 0)) {
  2183. ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
  2184. "unknown ROCOMPAT features");
  2185. return;
  2186. }
  2187. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2188. /* don't clear list on RO mount w/ errors */
  2189. if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
  2190. ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
  2191. "clearing orphan list.\n");
  2192. es->s_last_orphan = 0;
  2193. }
  2194. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2195. return;
  2196. }
  2197. if (s_flags & SB_RDONLY) {
  2198. ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
  2199. sb->s_flags &= ~SB_RDONLY;
  2200. }
  2201. #ifdef CONFIG_QUOTA
  2202. /* Needed for iput() to work correctly and not trash data */
  2203. sb->s_flags |= SB_ACTIVE;
  2204. /*
  2205. * Turn on quotas which were not enabled for read-only mounts if
  2206. * filesystem has quota feature, so that they are updated correctly.
  2207. */
  2208. if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
  2209. int ret = ext4_enable_quotas(sb);
  2210. if (!ret)
  2211. quota_update = 1;
  2212. else
  2213. ext4_msg(sb, KERN_ERR,
  2214. "Cannot turn on quotas: error %d", ret);
  2215. }
  2216. /* Turn on journaled quotas used for old sytle */
  2217. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2218. if (EXT4_SB(sb)->s_qf_names[i]) {
  2219. int ret = ext4_quota_on_mount(sb, i);
  2220. if (!ret)
  2221. quota_update = 1;
  2222. else
  2223. ext4_msg(sb, KERN_ERR,
  2224. "Cannot turn on journaled "
  2225. "quota: type %d: error %d", i, ret);
  2226. }
  2227. }
  2228. #endif
  2229. while (es->s_last_orphan) {
  2230. struct inode *inode;
  2231. /*
  2232. * We may have encountered an error during cleanup; if
  2233. * so, skip the rest.
  2234. */
  2235. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  2236. jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
  2237. es->s_last_orphan = 0;
  2238. break;
  2239. }
  2240. inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
  2241. if (IS_ERR(inode)) {
  2242. es->s_last_orphan = 0;
  2243. break;
  2244. }
  2245. list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
  2246. dquot_initialize(inode);
  2247. if (inode->i_nlink) {
  2248. if (test_opt(sb, DEBUG))
  2249. ext4_msg(sb, KERN_DEBUG,
  2250. "%s: truncating inode %lu to %lld bytes",
  2251. __func__, inode->i_ino, inode->i_size);
  2252. jbd_debug(2, "truncating inode %lu to %lld bytes\n",
  2253. inode->i_ino, inode->i_size);
  2254. inode_lock(inode);
  2255. truncate_inode_pages(inode->i_mapping, inode->i_size);
  2256. ret = ext4_truncate(inode);
  2257. if (ret)
  2258. ext4_std_error(inode->i_sb, ret);
  2259. inode_unlock(inode);
  2260. nr_truncates++;
  2261. } else {
  2262. if (test_opt(sb, DEBUG))
  2263. ext4_msg(sb, KERN_DEBUG,
  2264. "%s: deleting unreferenced inode %lu",
  2265. __func__, inode->i_ino);
  2266. jbd_debug(2, "deleting unreferenced inode %lu\n",
  2267. inode->i_ino);
  2268. nr_orphans++;
  2269. }
  2270. iput(inode); /* The delete magic happens here! */
  2271. }
  2272. #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
  2273. if (nr_orphans)
  2274. ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
  2275. PLURAL(nr_orphans));
  2276. if (nr_truncates)
  2277. ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
  2278. PLURAL(nr_truncates));
  2279. #ifdef CONFIG_QUOTA
  2280. /* Turn off quotas if they were enabled for orphan cleanup */
  2281. if (quota_update) {
  2282. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  2283. if (sb_dqopt(sb)->files[i])
  2284. dquot_quota_off(sb, i);
  2285. }
  2286. }
  2287. #endif
  2288. sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  2289. }
  2290. /*
  2291. * Maximal extent format file size.
  2292. * Resulting logical blkno at s_maxbytes must fit in our on-disk
  2293. * extent format containers, within a sector_t, and within i_blocks
  2294. * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
  2295. * so that won't be a limiting factor.
  2296. *
  2297. * However there is other limiting factor. We do store extents in the form
  2298. * of starting block and length, hence the resulting length of the extent
  2299. * covering maximum file size must fit into on-disk format containers as
  2300. * well. Given that length is always by 1 unit bigger than max unit (because
  2301. * we count 0 as well) we have to lower the s_maxbytes by one fs block.
  2302. *
  2303. * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  2304. */
  2305. static loff_t ext4_max_size(int blkbits, int has_huge_files)
  2306. {
  2307. loff_t res;
  2308. loff_t upper_limit = MAX_LFS_FILESIZE;
  2309. /* small i_blocks in vfs inode? */
  2310. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2311. /*
  2312. * CONFIG_LBDAF is not enabled implies the inode
  2313. * i_block represent total blocks in 512 bytes
  2314. * 32 == size of vfs inode i_blocks * 8
  2315. */
  2316. upper_limit = (1LL << 32) - 1;
  2317. /* total blocks in file system block size */
  2318. upper_limit >>= (blkbits - 9);
  2319. upper_limit <<= blkbits;
  2320. }
  2321. /*
  2322. * 32-bit extent-start container, ee_block. We lower the maxbytes
  2323. * by one fs block, so ee_len can cover the extent of maximum file
  2324. * size
  2325. */
  2326. res = (1LL << 32) - 1;
  2327. res <<= blkbits;
  2328. /* Sanity check against vm- & vfs- imposed limits */
  2329. if (res > upper_limit)
  2330. res = upper_limit;
  2331. return res;
  2332. }
  2333. /*
  2334. * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
  2335. * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
  2336. * We need to be 1 filesystem block less than the 2^48 sector limit.
  2337. */
  2338. static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
  2339. {
  2340. loff_t res = EXT4_NDIR_BLOCKS;
  2341. int meta_blocks;
  2342. loff_t upper_limit;
  2343. /* This is calculated to be the largest file size for a dense, block
  2344. * mapped file such that the file's total number of 512-byte sectors,
  2345. * including data and all indirect blocks, does not exceed (2^48 - 1).
  2346. *
  2347. * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
  2348. * number of 512-byte sectors of the file.
  2349. */
  2350. if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
  2351. /*
  2352. * !has_huge_files or CONFIG_LBDAF not enabled implies that
  2353. * the inode i_block field represents total file blocks in
  2354. * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
  2355. */
  2356. upper_limit = (1LL << 32) - 1;
  2357. /* total blocks in file system block size */
  2358. upper_limit >>= (bits - 9);
  2359. } else {
  2360. /*
  2361. * We use 48 bit ext4_inode i_blocks
  2362. * With EXT4_HUGE_FILE_FL set the i_blocks
  2363. * represent total number of blocks in
  2364. * file system block size
  2365. */
  2366. upper_limit = (1LL << 48) - 1;
  2367. }
  2368. /* indirect blocks */
  2369. meta_blocks = 1;
  2370. /* double indirect blocks */
  2371. meta_blocks += 1 + (1LL << (bits-2));
  2372. /* tripple indirect blocks */
  2373. meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
  2374. upper_limit -= meta_blocks;
  2375. upper_limit <<= bits;
  2376. res += 1LL << (bits-2);
  2377. res += 1LL << (2*(bits-2));
  2378. res += 1LL << (3*(bits-2));
  2379. res <<= bits;
  2380. if (res > upper_limit)
  2381. res = upper_limit;
  2382. if (res > MAX_LFS_FILESIZE)
  2383. res = MAX_LFS_FILESIZE;
  2384. return res;
  2385. }
  2386. static ext4_fsblk_t descriptor_loc(struct super_block *sb,
  2387. ext4_fsblk_t logical_sb_block, int nr)
  2388. {
  2389. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2390. ext4_group_t bg, first_meta_bg;
  2391. int has_super = 0;
  2392. first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
  2393. if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
  2394. return logical_sb_block + nr + 1;
  2395. bg = sbi->s_desc_per_block * nr;
  2396. if (ext4_bg_has_super(sb, bg))
  2397. has_super = 1;
  2398. /*
  2399. * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
  2400. * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
  2401. * on modern mke2fs or blksize > 1k on older mke2fs) then we must
  2402. * compensate.
  2403. */
  2404. if (sb->s_blocksize == 1024 && nr == 0 &&
  2405. le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
  2406. has_super++;
  2407. return (has_super + ext4_group_first_block_no(sb, bg));
  2408. }
  2409. /**
  2410. * ext4_get_stripe_size: Get the stripe size.
  2411. * @sbi: In memory super block info
  2412. *
  2413. * If we have specified it via mount option, then
  2414. * use the mount option value. If the value specified at mount time is
  2415. * greater than the blocks per group use the super block value.
  2416. * If the super block value is greater than blocks per group return 0.
  2417. * Allocator needs it be less than blocks per group.
  2418. *
  2419. */
  2420. static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
  2421. {
  2422. unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
  2423. unsigned long stripe_width =
  2424. le32_to_cpu(sbi->s_es->s_raid_stripe_width);
  2425. int ret;
  2426. if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
  2427. ret = sbi->s_stripe;
  2428. else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
  2429. ret = stripe_width;
  2430. else if (stride && stride <= sbi->s_blocks_per_group)
  2431. ret = stride;
  2432. else
  2433. ret = 0;
  2434. /*
  2435. * If the stripe width is 1, this makes no sense and
  2436. * we set it to 0 to turn off stripe handling code.
  2437. */
  2438. if (ret <= 1)
  2439. ret = 0;
  2440. return ret;
  2441. }
  2442. /*
  2443. * Check whether this filesystem can be mounted based on
  2444. * the features present and the RDONLY/RDWR mount requested.
  2445. * Returns 1 if this filesystem can be mounted as requested,
  2446. * 0 if it cannot be.
  2447. */
  2448. static int ext4_feature_set_ok(struct super_block *sb, int readonly)
  2449. {
  2450. if (ext4_has_unknown_ext4_incompat_features(sb)) {
  2451. ext4_msg(sb, KERN_ERR,
  2452. "Couldn't mount because of "
  2453. "unsupported optional features (%x)",
  2454. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
  2455. ~EXT4_FEATURE_INCOMPAT_SUPP));
  2456. return 0;
  2457. }
  2458. if (readonly)
  2459. return 1;
  2460. if (ext4_has_feature_readonly(sb)) {
  2461. ext4_msg(sb, KERN_INFO, "filesystem is read-only");
  2462. sb->s_flags |= SB_RDONLY;
  2463. return 1;
  2464. }
  2465. /* Check that feature set is OK for a read-write mount */
  2466. if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
  2467. ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
  2468. "unsupported optional features (%x)",
  2469. (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
  2470. ~EXT4_FEATURE_RO_COMPAT_SUPP));
  2471. return 0;
  2472. }
  2473. /*
  2474. * Large file size enabled file system can only be mounted
  2475. * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
  2476. */
  2477. if (ext4_has_feature_huge_file(sb)) {
  2478. if (sizeof(blkcnt_t) < sizeof(u64)) {
  2479. ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
  2480. "cannot be mounted RDWR without "
  2481. "CONFIG_LBDAF");
  2482. return 0;
  2483. }
  2484. }
  2485. if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
  2486. ext4_msg(sb, KERN_ERR,
  2487. "Can't support bigalloc feature without "
  2488. "extents feature\n");
  2489. return 0;
  2490. }
  2491. #ifndef CONFIG_QUOTA
  2492. if (ext4_has_feature_quota(sb) && !readonly) {
  2493. ext4_msg(sb, KERN_ERR,
  2494. "Filesystem with quota feature cannot be mounted RDWR "
  2495. "without CONFIG_QUOTA");
  2496. return 0;
  2497. }
  2498. if (ext4_has_feature_project(sb) && !readonly) {
  2499. ext4_msg(sb, KERN_ERR,
  2500. "Filesystem with project quota feature cannot be mounted RDWR "
  2501. "without CONFIG_QUOTA");
  2502. return 0;
  2503. }
  2504. #endif /* CONFIG_QUOTA */
  2505. return 1;
  2506. }
  2507. /*
  2508. * This function is called once a day if we have errors logged
  2509. * on the file system
  2510. */
  2511. static void print_daily_error_info(struct timer_list *t)
  2512. {
  2513. struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
  2514. struct super_block *sb = sbi->s_sb;
  2515. struct ext4_super_block *es = sbi->s_es;
  2516. if (es->s_error_count)
  2517. /* fsck newer than v1.41.13 is needed to clean this condition. */
  2518. ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
  2519. le32_to_cpu(es->s_error_count));
  2520. if (es->s_first_error_time) {
  2521. printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
  2522. sb->s_id, le32_to_cpu(es->s_first_error_time),
  2523. (int) sizeof(es->s_first_error_func),
  2524. es->s_first_error_func,
  2525. le32_to_cpu(es->s_first_error_line));
  2526. if (es->s_first_error_ino)
  2527. printk(KERN_CONT ": inode %u",
  2528. le32_to_cpu(es->s_first_error_ino));
  2529. if (es->s_first_error_block)
  2530. printk(KERN_CONT ": block %llu", (unsigned long long)
  2531. le64_to_cpu(es->s_first_error_block));
  2532. printk(KERN_CONT "\n");
  2533. }
  2534. if (es->s_last_error_time) {
  2535. printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
  2536. sb->s_id, le32_to_cpu(es->s_last_error_time),
  2537. (int) sizeof(es->s_last_error_func),
  2538. es->s_last_error_func,
  2539. le32_to_cpu(es->s_last_error_line));
  2540. if (es->s_last_error_ino)
  2541. printk(KERN_CONT ": inode %u",
  2542. le32_to_cpu(es->s_last_error_ino));
  2543. if (es->s_last_error_block)
  2544. printk(KERN_CONT ": block %llu", (unsigned long long)
  2545. le64_to_cpu(es->s_last_error_block));
  2546. printk(KERN_CONT "\n");
  2547. }
  2548. mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
  2549. }
  2550. /* Find next suitable group and run ext4_init_inode_table */
  2551. static int ext4_run_li_request(struct ext4_li_request *elr)
  2552. {
  2553. struct ext4_group_desc *gdp = NULL;
  2554. ext4_group_t group, ngroups;
  2555. struct super_block *sb;
  2556. unsigned long timeout = 0;
  2557. int ret = 0;
  2558. sb = elr->lr_super;
  2559. ngroups = EXT4_SB(sb)->s_groups_count;
  2560. for (group = elr->lr_next_group; group < ngroups; group++) {
  2561. gdp = ext4_get_group_desc(sb, group, NULL);
  2562. if (!gdp) {
  2563. ret = 1;
  2564. break;
  2565. }
  2566. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2567. break;
  2568. }
  2569. if (group >= ngroups)
  2570. ret = 1;
  2571. if (!ret) {
  2572. timeout = jiffies;
  2573. ret = ext4_init_inode_table(sb, group,
  2574. elr->lr_timeout ? 0 : 1);
  2575. if (elr->lr_timeout == 0) {
  2576. timeout = (jiffies - timeout) *
  2577. elr->lr_sbi->s_li_wait_mult;
  2578. elr->lr_timeout = timeout;
  2579. }
  2580. elr->lr_next_sched = jiffies + elr->lr_timeout;
  2581. elr->lr_next_group = group + 1;
  2582. }
  2583. return ret;
  2584. }
  2585. /*
  2586. * Remove lr_request from the list_request and free the
  2587. * request structure. Should be called with li_list_mtx held
  2588. */
  2589. static void ext4_remove_li_request(struct ext4_li_request *elr)
  2590. {
  2591. struct ext4_sb_info *sbi;
  2592. if (!elr)
  2593. return;
  2594. sbi = elr->lr_sbi;
  2595. list_del(&elr->lr_request);
  2596. sbi->s_li_request = NULL;
  2597. kfree(elr);
  2598. }
  2599. static void ext4_unregister_li_request(struct super_block *sb)
  2600. {
  2601. mutex_lock(&ext4_li_mtx);
  2602. if (!ext4_li_info) {
  2603. mutex_unlock(&ext4_li_mtx);
  2604. return;
  2605. }
  2606. mutex_lock(&ext4_li_info->li_list_mtx);
  2607. ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
  2608. mutex_unlock(&ext4_li_info->li_list_mtx);
  2609. mutex_unlock(&ext4_li_mtx);
  2610. }
  2611. static struct task_struct *ext4_lazyinit_task;
  2612. /*
  2613. * This is the function where ext4lazyinit thread lives. It walks
  2614. * through the request list searching for next scheduled filesystem.
  2615. * When such a fs is found, run the lazy initialization request
  2616. * (ext4_rn_li_request) and keep track of the time spend in this
  2617. * function. Based on that time we compute next schedule time of
  2618. * the request. When walking through the list is complete, compute
  2619. * next waking time and put itself into sleep.
  2620. */
  2621. static int ext4_lazyinit_thread(void *arg)
  2622. {
  2623. struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
  2624. struct list_head *pos, *n;
  2625. struct ext4_li_request *elr;
  2626. unsigned long next_wakeup, cur;
  2627. BUG_ON(NULL == eli);
  2628. cont_thread:
  2629. while (true) {
  2630. next_wakeup = MAX_JIFFY_OFFSET;
  2631. mutex_lock(&eli->li_list_mtx);
  2632. if (list_empty(&eli->li_request_list)) {
  2633. mutex_unlock(&eli->li_list_mtx);
  2634. goto exit_thread;
  2635. }
  2636. list_for_each_safe(pos, n, &eli->li_request_list) {
  2637. int err = 0;
  2638. int progress = 0;
  2639. elr = list_entry(pos, struct ext4_li_request,
  2640. lr_request);
  2641. if (time_before(jiffies, elr->lr_next_sched)) {
  2642. if (time_before(elr->lr_next_sched, next_wakeup))
  2643. next_wakeup = elr->lr_next_sched;
  2644. continue;
  2645. }
  2646. if (down_read_trylock(&elr->lr_super->s_umount)) {
  2647. if (sb_start_write_trylock(elr->lr_super)) {
  2648. progress = 1;
  2649. /*
  2650. * We hold sb->s_umount, sb can not
  2651. * be removed from the list, it is
  2652. * now safe to drop li_list_mtx
  2653. */
  2654. mutex_unlock(&eli->li_list_mtx);
  2655. err = ext4_run_li_request(elr);
  2656. sb_end_write(elr->lr_super);
  2657. mutex_lock(&eli->li_list_mtx);
  2658. n = pos->next;
  2659. }
  2660. up_read((&elr->lr_super->s_umount));
  2661. }
  2662. /* error, remove the lazy_init job */
  2663. if (err) {
  2664. ext4_remove_li_request(elr);
  2665. continue;
  2666. }
  2667. if (!progress) {
  2668. elr->lr_next_sched = jiffies +
  2669. (prandom_u32()
  2670. % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2671. }
  2672. if (time_before(elr->lr_next_sched, next_wakeup))
  2673. next_wakeup = elr->lr_next_sched;
  2674. }
  2675. mutex_unlock(&eli->li_list_mtx);
  2676. try_to_freeze();
  2677. cur = jiffies;
  2678. if ((time_after_eq(cur, next_wakeup)) ||
  2679. (MAX_JIFFY_OFFSET == next_wakeup)) {
  2680. cond_resched();
  2681. continue;
  2682. }
  2683. schedule_timeout_interruptible(next_wakeup - cur);
  2684. if (kthread_should_stop()) {
  2685. ext4_clear_request_list();
  2686. goto exit_thread;
  2687. }
  2688. }
  2689. exit_thread:
  2690. /*
  2691. * It looks like the request list is empty, but we need
  2692. * to check it under the li_list_mtx lock, to prevent any
  2693. * additions into it, and of course we should lock ext4_li_mtx
  2694. * to atomically free the list and ext4_li_info, because at
  2695. * this point another ext4 filesystem could be registering
  2696. * new one.
  2697. */
  2698. mutex_lock(&ext4_li_mtx);
  2699. mutex_lock(&eli->li_list_mtx);
  2700. if (!list_empty(&eli->li_request_list)) {
  2701. mutex_unlock(&eli->li_list_mtx);
  2702. mutex_unlock(&ext4_li_mtx);
  2703. goto cont_thread;
  2704. }
  2705. mutex_unlock(&eli->li_list_mtx);
  2706. kfree(ext4_li_info);
  2707. ext4_li_info = NULL;
  2708. mutex_unlock(&ext4_li_mtx);
  2709. return 0;
  2710. }
  2711. static void ext4_clear_request_list(void)
  2712. {
  2713. struct list_head *pos, *n;
  2714. struct ext4_li_request *elr;
  2715. mutex_lock(&ext4_li_info->li_list_mtx);
  2716. list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
  2717. elr = list_entry(pos, struct ext4_li_request,
  2718. lr_request);
  2719. ext4_remove_li_request(elr);
  2720. }
  2721. mutex_unlock(&ext4_li_info->li_list_mtx);
  2722. }
  2723. static int ext4_run_lazyinit_thread(void)
  2724. {
  2725. ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
  2726. ext4_li_info, "ext4lazyinit");
  2727. if (IS_ERR(ext4_lazyinit_task)) {
  2728. int err = PTR_ERR(ext4_lazyinit_task);
  2729. ext4_clear_request_list();
  2730. kfree(ext4_li_info);
  2731. ext4_li_info = NULL;
  2732. printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
  2733. "initialization thread\n",
  2734. err);
  2735. return err;
  2736. }
  2737. ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
  2738. return 0;
  2739. }
  2740. /*
  2741. * Check whether it make sense to run itable init. thread or not.
  2742. * If there is at least one uninitialized inode table, return
  2743. * corresponding group number, else the loop goes through all
  2744. * groups and return total number of groups.
  2745. */
  2746. static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
  2747. {
  2748. ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
  2749. struct ext4_group_desc *gdp = NULL;
  2750. for (group = 0; group < ngroups; group++) {
  2751. gdp = ext4_get_group_desc(sb, group, NULL);
  2752. if (!gdp)
  2753. continue;
  2754. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
  2755. break;
  2756. }
  2757. return group;
  2758. }
  2759. static int ext4_li_info_new(void)
  2760. {
  2761. struct ext4_lazy_init *eli = NULL;
  2762. eli = kzalloc(sizeof(*eli), GFP_KERNEL);
  2763. if (!eli)
  2764. return -ENOMEM;
  2765. INIT_LIST_HEAD(&eli->li_request_list);
  2766. mutex_init(&eli->li_list_mtx);
  2767. eli->li_state |= EXT4_LAZYINIT_QUIT;
  2768. ext4_li_info = eli;
  2769. return 0;
  2770. }
  2771. static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
  2772. ext4_group_t start)
  2773. {
  2774. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2775. struct ext4_li_request *elr;
  2776. elr = kzalloc(sizeof(*elr), GFP_KERNEL);
  2777. if (!elr)
  2778. return NULL;
  2779. elr->lr_super = sb;
  2780. elr->lr_sbi = sbi;
  2781. elr->lr_next_group = start;
  2782. /*
  2783. * Randomize first schedule time of the request to
  2784. * spread the inode table initialization requests
  2785. * better.
  2786. */
  2787. elr->lr_next_sched = jiffies + (prandom_u32() %
  2788. (EXT4_DEF_LI_MAX_START_DELAY * HZ));
  2789. return elr;
  2790. }
  2791. int ext4_register_li_request(struct super_block *sb,
  2792. ext4_group_t first_not_zeroed)
  2793. {
  2794. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2795. struct ext4_li_request *elr = NULL;
  2796. ext4_group_t ngroups = sbi->s_groups_count;
  2797. int ret = 0;
  2798. mutex_lock(&ext4_li_mtx);
  2799. if (sbi->s_li_request != NULL) {
  2800. /*
  2801. * Reset timeout so it can be computed again, because
  2802. * s_li_wait_mult might have changed.
  2803. */
  2804. sbi->s_li_request->lr_timeout = 0;
  2805. goto out;
  2806. }
  2807. if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
  2808. !test_opt(sb, INIT_INODE_TABLE))
  2809. goto out;
  2810. elr = ext4_li_request_new(sb, first_not_zeroed);
  2811. if (!elr) {
  2812. ret = -ENOMEM;
  2813. goto out;
  2814. }
  2815. if (NULL == ext4_li_info) {
  2816. ret = ext4_li_info_new();
  2817. if (ret)
  2818. goto out;
  2819. }
  2820. mutex_lock(&ext4_li_info->li_list_mtx);
  2821. list_add(&elr->lr_request, &ext4_li_info->li_request_list);
  2822. mutex_unlock(&ext4_li_info->li_list_mtx);
  2823. sbi->s_li_request = elr;
  2824. /*
  2825. * set elr to NULL here since it has been inserted to
  2826. * the request_list and the removal and free of it is
  2827. * handled by ext4_clear_request_list from now on.
  2828. */
  2829. elr = NULL;
  2830. if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
  2831. ret = ext4_run_lazyinit_thread();
  2832. if (ret)
  2833. goto out;
  2834. }
  2835. out:
  2836. mutex_unlock(&ext4_li_mtx);
  2837. if (ret)
  2838. kfree(elr);
  2839. return ret;
  2840. }
  2841. /*
  2842. * We do not need to lock anything since this is called on
  2843. * module unload.
  2844. */
  2845. static void ext4_destroy_lazyinit_thread(void)
  2846. {
  2847. /*
  2848. * If thread exited earlier
  2849. * there's nothing to be done.
  2850. */
  2851. if (!ext4_li_info || !ext4_lazyinit_task)
  2852. return;
  2853. kthread_stop(ext4_lazyinit_task);
  2854. }
  2855. static int set_journal_csum_feature_set(struct super_block *sb)
  2856. {
  2857. int ret = 1;
  2858. int compat, incompat;
  2859. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2860. if (ext4_has_metadata_csum(sb)) {
  2861. /* journal checksum v3 */
  2862. compat = 0;
  2863. incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
  2864. } else {
  2865. /* journal checksum v1 */
  2866. compat = JBD2_FEATURE_COMPAT_CHECKSUM;
  2867. incompat = 0;
  2868. }
  2869. jbd2_journal_clear_features(sbi->s_journal,
  2870. JBD2_FEATURE_COMPAT_CHECKSUM, 0,
  2871. JBD2_FEATURE_INCOMPAT_CSUM_V3 |
  2872. JBD2_FEATURE_INCOMPAT_CSUM_V2);
  2873. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  2874. ret = jbd2_journal_set_features(sbi->s_journal,
  2875. compat, 0,
  2876. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
  2877. incompat);
  2878. } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
  2879. ret = jbd2_journal_set_features(sbi->s_journal,
  2880. compat, 0,
  2881. incompat);
  2882. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2883. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2884. } else {
  2885. jbd2_journal_clear_features(sbi->s_journal, 0, 0,
  2886. JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
  2887. }
  2888. return ret;
  2889. }
  2890. /*
  2891. * Note: calculating the overhead so we can be compatible with
  2892. * historical BSD practice is quite difficult in the face of
  2893. * clusters/bigalloc. This is because multiple metadata blocks from
  2894. * different block group can end up in the same allocation cluster.
  2895. * Calculating the exact overhead in the face of clustered allocation
  2896. * requires either O(all block bitmaps) in memory or O(number of block
  2897. * groups**2) in time. We will still calculate the superblock for
  2898. * older file systems --- and if we come across with a bigalloc file
  2899. * system with zero in s_overhead_clusters the estimate will be close to
  2900. * correct especially for very large cluster sizes --- but for newer
  2901. * file systems, it's better to calculate this figure once at mkfs
  2902. * time, and store it in the superblock. If the superblock value is
  2903. * present (even for non-bigalloc file systems), we will use it.
  2904. */
  2905. static int count_overhead(struct super_block *sb, ext4_group_t grp,
  2906. char *buf)
  2907. {
  2908. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2909. struct ext4_group_desc *gdp;
  2910. ext4_fsblk_t first_block, last_block, b;
  2911. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2912. int s, j, count = 0;
  2913. if (!ext4_has_feature_bigalloc(sb))
  2914. return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
  2915. sbi->s_itb_per_group + 2);
  2916. first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
  2917. (grp * EXT4_BLOCKS_PER_GROUP(sb));
  2918. last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
  2919. for (i = 0; i < ngroups; i++) {
  2920. gdp = ext4_get_group_desc(sb, i, NULL);
  2921. b = ext4_block_bitmap(sb, gdp);
  2922. if (b >= first_block && b <= last_block) {
  2923. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2924. count++;
  2925. }
  2926. b = ext4_inode_bitmap(sb, gdp);
  2927. if (b >= first_block && b <= last_block) {
  2928. ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
  2929. count++;
  2930. }
  2931. b = ext4_inode_table(sb, gdp);
  2932. if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
  2933. for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
  2934. int c = EXT4_B2C(sbi, b - first_block);
  2935. ext4_set_bit(c, buf);
  2936. count++;
  2937. }
  2938. if (i != grp)
  2939. continue;
  2940. s = 0;
  2941. if (ext4_bg_has_super(sb, grp)) {
  2942. ext4_set_bit(s++, buf);
  2943. count++;
  2944. }
  2945. j = ext4_bg_num_gdb(sb, grp);
  2946. if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
  2947. ext4_error(sb, "Invalid number of block group "
  2948. "descriptor blocks: %d", j);
  2949. j = EXT4_BLOCKS_PER_GROUP(sb) - s;
  2950. }
  2951. count += j;
  2952. for (; j > 0; j--)
  2953. ext4_set_bit(EXT4_B2C(sbi, s++), buf);
  2954. }
  2955. if (!count)
  2956. return 0;
  2957. return EXT4_CLUSTERS_PER_GROUP(sb) -
  2958. ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
  2959. }
  2960. /*
  2961. * Compute the overhead and stash it in sbi->s_overhead
  2962. */
  2963. int ext4_calculate_overhead(struct super_block *sb)
  2964. {
  2965. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2966. struct ext4_super_block *es = sbi->s_es;
  2967. struct inode *j_inode;
  2968. unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
  2969. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  2970. ext4_fsblk_t overhead = 0;
  2971. char *buf = (char *) get_zeroed_page(GFP_NOFS);
  2972. if (!buf)
  2973. return -ENOMEM;
  2974. /*
  2975. * Compute the overhead (FS structures). This is constant
  2976. * for a given filesystem unless the number of block groups
  2977. * changes so we cache the previous value until it does.
  2978. */
  2979. /*
  2980. * All of the blocks before first_data_block are overhead
  2981. */
  2982. overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
  2983. /*
  2984. * Add the overhead found in each block group
  2985. */
  2986. for (i = 0; i < ngroups; i++) {
  2987. int blks;
  2988. blks = count_overhead(sb, i, buf);
  2989. overhead += blks;
  2990. if (blks)
  2991. memset(buf, 0, PAGE_SIZE);
  2992. cond_resched();
  2993. }
  2994. /*
  2995. * Add the internal journal blocks whether the journal has been
  2996. * loaded or not
  2997. */
  2998. if (sbi->s_journal && !sbi->journal_bdev)
  2999. overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
  3000. else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
  3001. j_inode = ext4_get_journal_inode(sb, j_inum);
  3002. if (j_inode) {
  3003. j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
  3004. overhead += EXT4_NUM_B2C(sbi, j_blocks);
  3005. iput(j_inode);
  3006. } else {
  3007. ext4_msg(sb, KERN_ERR, "can't get journal size");
  3008. }
  3009. }
  3010. sbi->s_overhead = overhead;
  3011. smp_wmb();
  3012. free_page((unsigned long) buf);
  3013. return 0;
  3014. }
  3015. static void ext4_set_resv_clusters(struct super_block *sb)
  3016. {
  3017. ext4_fsblk_t resv_clusters;
  3018. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3019. /*
  3020. * There's no need to reserve anything when we aren't using extents.
  3021. * The space estimates are exact, there are no unwritten extents,
  3022. * hole punching doesn't need new metadata... This is needed especially
  3023. * to keep ext2/3 backward compatibility.
  3024. */
  3025. if (!ext4_has_feature_extents(sb))
  3026. return;
  3027. /*
  3028. * By default we reserve 2% or 4096 clusters, whichever is smaller.
  3029. * This should cover the situations where we can not afford to run
  3030. * out of space like for example punch hole, or converting
  3031. * unwritten extents in delalloc path. In most cases such
  3032. * allocation would require 1, or 2 blocks, higher numbers are
  3033. * very rare.
  3034. */
  3035. resv_clusters = (ext4_blocks_count(sbi->s_es) >>
  3036. sbi->s_cluster_bits);
  3037. do_div(resv_clusters, 50);
  3038. resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
  3039. atomic64_set(&sbi->s_resv_clusters, resv_clusters);
  3040. }
  3041. static int ext4_fill_super(struct super_block *sb, void *data, int silent)
  3042. {
  3043. struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
  3044. char *orig_data = kstrdup(data, GFP_KERNEL);
  3045. struct buffer_head *bh;
  3046. struct ext4_super_block *es = NULL;
  3047. struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
  3048. ext4_fsblk_t block;
  3049. ext4_fsblk_t sb_block = get_sb_block(&data);
  3050. ext4_fsblk_t logical_sb_block;
  3051. unsigned long offset = 0;
  3052. unsigned long journal_devnum = 0;
  3053. unsigned long def_mount_opts;
  3054. struct inode *root;
  3055. const char *descr;
  3056. int ret = -ENOMEM;
  3057. int blocksize, clustersize;
  3058. unsigned int db_count;
  3059. unsigned int i;
  3060. int needs_recovery, has_huge_files, has_bigalloc;
  3061. __u64 blocks_count;
  3062. int err = 0;
  3063. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  3064. ext4_group_t first_not_zeroed;
  3065. if ((data && !orig_data) || !sbi)
  3066. goto out_free_base;
  3067. sbi->s_daxdev = dax_dev;
  3068. sbi->s_blockgroup_lock =
  3069. kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
  3070. if (!sbi->s_blockgroup_lock)
  3071. goto out_free_base;
  3072. sb->s_fs_info = sbi;
  3073. sbi->s_sb = sb;
  3074. sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
  3075. sbi->s_sb_block = sb_block;
  3076. if (sb->s_bdev->bd_part)
  3077. sbi->s_sectors_written_start =
  3078. part_stat_read(sb->s_bdev->bd_part, sectors[1]);
  3079. /* Cleanup superblock name */
  3080. strreplace(sb->s_id, '/', '!');
  3081. /* -EINVAL is default */
  3082. ret = -EINVAL;
  3083. blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
  3084. if (!blocksize) {
  3085. ext4_msg(sb, KERN_ERR, "unable to set blocksize");
  3086. goto out_fail;
  3087. }
  3088. /*
  3089. * The ext4 superblock will not be buffer aligned for other than 1kB
  3090. * block sizes. We need to calculate the offset from buffer start.
  3091. */
  3092. if (blocksize != EXT4_MIN_BLOCK_SIZE) {
  3093. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3094. offset = do_div(logical_sb_block, blocksize);
  3095. } else {
  3096. logical_sb_block = sb_block;
  3097. }
  3098. if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
  3099. ext4_msg(sb, KERN_ERR, "unable to read superblock");
  3100. goto out_fail;
  3101. }
  3102. /*
  3103. * Note: s_es must be initialized as soon as possible because
  3104. * some ext4 macro-instructions depend on its value
  3105. */
  3106. es = (struct ext4_super_block *) (bh->b_data + offset);
  3107. sbi->s_es = es;
  3108. sb->s_magic = le16_to_cpu(es->s_magic);
  3109. if (sb->s_magic != EXT4_SUPER_MAGIC)
  3110. goto cantfind_ext4;
  3111. sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
  3112. /* Warn if metadata_csum and gdt_csum are both set. */
  3113. if (ext4_has_feature_metadata_csum(sb) &&
  3114. ext4_has_feature_gdt_csum(sb))
  3115. ext4_warning(sb, "metadata_csum and uninit_bg are "
  3116. "redundant flags; please run fsck.");
  3117. /* Check for a known checksum algorithm */
  3118. if (!ext4_verify_csum_type(sb, es)) {
  3119. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3120. "unknown checksum algorithm.");
  3121. silent = 1;
  3122. goto cantfind_ext4;
  3123. }
  3124. /* Load the checksum driver */
  3125. if (ext4_has_feature_metadata_csum(sb) ||
  3126. ext4_has_feature_ea_inode(sb)) {
  3127. sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
  3128. if (IS_ERR(sbi->s_chksum_driver)) {
  3129. ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
  3130. ret = PTR_ERR(sbi->s_chksum_driver);
  3131. sbi->s_chksum_driver = NULL;
  3132. goto failed_mount;
  3133. }
  3134. }
  3135. /* Check superblock checksum */
  3136. if (!ext4_superblock_csum_verify(sb, es)) {
  3137. ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
  3138. "invalid superblock checksum. Run e2fsck?");
  3139. silent = 1;
  3140. ret = -EFSBADCRC;
  3141. goto cantfind_ext4;
  3142. }
  3143. /* Precompute checksum seed for all metadata */
  3144. if (ext4_has_feature_csum_seed(sb))
  3145. sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
  3146. else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
  3147. sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
  3148. sizeof(es->s_uuid));
  3149. /* Set defaults before we parse the mount options */
  3150. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  3151. set_opt(sb, INIT_INODE_TABLE);
  3152. if (def_mount_opts & EXT4_DEFM_DEBUG)
  3153. set_opt(sb, DEBUG);
  3154. if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
  3155. set_opt(sb, GRPID);
  3156. if (def_mount_opts & EXT4_DEFM_UID16)
  3157. set_opt(sb, NO_UID32);
  3158. /* xattr user namespace & acls are now defaulted on */
  3159. set_opt(sb, XATTR_USER);
  3160. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  3161. set_opt(sb, POSIX_ACL);
  3162. #endif
  3163. /* don't forget to enable journal_csum when metadata_csum is enabled. */
  3164. if (ext4_has_metadata_csum(sb))
  3165. set_opt(sb, JOURNAL_CHECKSUM);
  3166. if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
  3167. set_opt(sb, JOURNAL_DATA);
  3168. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
  3169. set_opt(sb, ORDERED_DATA);
  3170. else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
  3171. set_opt(sb, WRITEBACK_DATA);
  3172. if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
  3173. set_opt(sb, ERRORS_PANIC);
  3174. else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
  3175. set_opt(sb, ERRORS_CONT);
  3176. else
  3177. set_opt(sb, ERRORS_RO);
  3178. /* block_validity enabled by default; disable with noblock_validity */
  3179. set_opt(sb, BLOCK_VALIDITY);
  3180. if (def_mount_opts & EXT4_DEFM_DISCARD)
  3181. set_opt(sb, DISCARD);
  3182. sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
  3183. sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
  3184. sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
  3185. sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
  3186. sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
  3187. if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
  3188. set_opt(sb, BARRIER);
  3189. /*
  3190. * enable delayed allocation by default
  3191. * Use -o nodelalloc to turn it off
  3192. */
  3193. if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
  3194. ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
  3195. set_opt(sb, DELALLOC);
  3196. /*
  3197. * set default s_li_wait_mult for lazyinit, for the case there is
  3198. * no mount option specified.
  3199. */
  3200. sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
  3201. if (sbi->s_es->s_mount_opts[0]) {
  3202. char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
  3203. sizeof(sbi->s_es->s_mount_opts),
  3204. GFP_KERNEL);
  3205. if (!s_mount_opts)
  3206. goto failed_mount;
  3207. if (!parse_options(s_mount_opts, sb, &journal_devnum,
  3208. &journal_ioprio, 0)) {
  3209. ext4_msg(sb, KERN_WARNING,
  3210. "failed to parse options in superblock: %s",
  3211. s_mount_opts);
  3212. }
  3213. kfree(s_mount_opts);
  3214. }
  3215. sbi->s_def_mount_opt = sbi->s_mount_opt;
  3216. if (!parse_options((char *) data, sb, &journal_devnum,
  3217. &journal_ioprio, 0))
  3218. goto failed_mount;
  3219. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  3220. printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
  3221. "with data=journal disables delayed "
  3222. "allocation and O_DIRECT support!\n");
  3223. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  3224. ext4_msg(sb, KERN_ERR, "can't mount with "
  3225. "both data=journal and delalloc");
  3226. goto failed_mount;
  3227. }
  3228. if (test_opt(sb, DIOREAD_NOLOCK)) {
  3229. ext4_msg(sb, KERN_ERR, "can't mount with "
  3230. "both data=journal and dioread_nolock");
  3231. goto failed_mount;
  3232. }
  3233. if (test_opt(sb, DAX)) {
  3234. ext4_msg(sb, KERN_ERR, "can't mount with "
  3235. "both data=journal and dax");
  3236. goto failed_mount;
  3237. }
  3238. if (ext4_has_feature_encrypt(sb)) {
  3239. ext4_msg(sb, KERN_WARNING,
  3240. "encrypted files will use data=ordered "
  3241. "instead of data journaling mode");
  3242. }
  3243. if (test_opt(sb, DELALLOC))
  3244. clear_opt(sb, DELALLOC);
  3245. } else {
  3246. sb->s_iflags |= SB_I_CGROUPWB;
  3247. }
  3248. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  3249. (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
  3250. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
  3251. (ext4_has_compat_features(sb) ||
  3252. ext4_has_ro_compat_features(sb) ||
  3253. ext4_has_incompat_features(sb)))
  3254. ext4_msg(sb, KERN_WARNING,
  3255. "feature flags set on rev 0 fs, "
  3256. "running e2fsck is recommended");
  3257. if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
  3258. set_opt2(sb, HURD_COMPAT);
  3259. if (ext4_has_feature_64bit(sb)) {
  3260. ext4_msg(sb, KERN_ERR,
  3261. "The Hurd can't support 64-bit file systems");
  3262. goto failed_mount;
  3263. }
  3264. /*
  3265. * ea_inode feature uses l_i_version field which is not
  3266. * available in HURD_COMPAT mode.
  3267. */
  3268. if (ext4_has_feature_ea_inode(sb)) {
  3269. ext4_msg(sb, KERN_ERR,
  3270. "ea_inode feature is not supported for Hurd");
  3271. goto failed_mount;
  3272. }
  3273. }
  3274. if (IS_EXT2_SB(sb)) {
  3275. if (ext2_feature_set_ok(sb))
  3276. ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
  3277. "using the ext4 subsystem");
  3278. else {
  3279. ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
  3280. "to feature incompatibilities");
  3281. goto failed_mount;
  3282. }
  3283. }
  3284. if (IS_EXT3_SB(sb)) {
  3285. if (ext3_feature_set_ok(sb))
  3286. ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
  3287. "using the ext4 subsystem");
  3288. else {
  3289. ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
  3290. "to feature incompatibilities");
  3291. goto failed_mount;
  3292. }
  3293. }
  3294. /*
  3295. * Check feature flags regardless of the revision level, since we
  3296. * previously didn't change the revision level when setting the flags,
  3297. * so there is a chance incompat flags are set on a rev 0 filesystem.
  3298. */
  3299. if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
  3300. goto failed_mount;
  3301. blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
  3302. if (blocksize < EXT4_MIN_BLOCK_SIZE ||
  3303. blocksize > EXT4_MAX_BLOCK_SIZE) {
  3304. ext4_msg(sb, KERN_ERR,
  3305. "Unsupported filesystem blocksize %d (%d log_block_size)",
  3306. blocksize, le32_to_cpu(es->s_log_block_size));
  3307. goto failed_mount;
  3308. }
  3309. if (le32_to_cpu(es->s_log_block_size) >
  3310. (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3311. ext4_msg(sb, KERN_ERR,
  3312. "Invalid log block size: %u",
  3313. le32_to_cpu(es->s_log_block_size));
  3314. goto failed_mount;
  3315. }
  3316. if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
  3317. ext4_msg(sb, KERN_ERR,
  3318. "Number of reserved GDT blocks insanely large: %d",
  3319. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
  3320. goto failed_mount;
  3321. }
  3322. if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
  3323. if (ext4_has_feature_inline_data(sb)) {
  3324. ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
  3325. " that may contain inline data");
  3326. sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
  3327. }
  3328. err = bdev_dax_supported(sb, blocksize);
  3329. if (err) {
  3330. ext4_msg(sb, KERN_ERR,
  3331. "DAX unsupported by block device. Turning off DAX.");
  3332. sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
  3333. }
  3334. }
  3335. if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
  3336. ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
  3337. es->s_encryption_level);
  3338. goto failed_mount;
  3339. }
  3340. if (sb->s_blocksize != blocksize) {
  3341. /* Validate the filesystem blocksize */
  3342. if (!sb_set_blocksize(sb, blocksize)) {
  3343. ext4_msg(sb, KERN_ERR, "bad block size %d",
  3344. blocksize);
  3345. goto failed_mount;
  3346. }
  3347. brelse(bh);
  3348. logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
  3349. offset = do_div(logical_sb_block, blocksize);
  3350. bh = sb_bread_unmovable(sb, logical_sb_block);
  3351. if (!bh) {
  3352. ext4_msg(sb, KERN_ERR,
  3353. "Can't read superblock on 2nd try");
  3354. goto failed_mount;
  3355. }
  3356. es = (struct ext4_super_block *)(bh->b_data + offset);
  3357. sbi->s_es = es;
  3358. if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
  3359. ext4_msg(sb, KERN_ERR,
  3360. "Magic mismatch, very weird!");
  3361. goto failed_mount;
  3362. }
  3363. }
  3364. has_huge_files = ext4_has_feature_huge_file(sb);
  3365. sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
  3366. has_huge_files);
  3367. sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
  3368. if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
  3369. sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
  3370. sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
  3371. } else {
  3372. sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
  3373. sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
  3374. if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
  3375. (!is_power_of_2(sbi->s_inode_size)) ||
  3376. (sbi->s_inode_size > blocksize)) {
  3377. ext4_msg(sb, KERN_ERR,
  3378. "unsupported inode size: %d",
  3379. sbi->s_inode_size);
  3380. goto failed_mount;
  3381. }
  3382. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
  3383. sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
  3384. }
  3385. sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
  3386. if (ext4_has_feature_64bit(sb)) {
  3387. if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
  3388. sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
  3389. !is_power_of_2(sbi->s_desc_size)) {
  3390. ext4_msg(sb, KERN_ERR,
  3391. "unsupported descriptor size %lu",
  3392. sbi->s_desc_size);
  3393. goto failed_mount;
  3394. }
  3395. } else
  3396. sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
  3397. sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
  3398. sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
  3399. sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
  3400. if (sbi->s_inodes_per_block == 0)
  3401. goto cantfind_ext4;
  3402. if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
  3403. sbi->s_inodes_per_group > blocksize * 8) {
  3404. ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
  3405. sbi->s_blocks_per_group);
  3406. goto failed_mount;
  3407. }
  3408. sbi->s_itb_per_group = sbi->s_inodes_per_group /
  3409. sbi->s_inodes_per_block;
  3410. sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
  3411. sbi->s_sbh = bh;
  3412. sbi->s_mount_state = le16_to_cpu(es->s_state);
  3413. sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
  3414. sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
  3415. for (i = 0; i < 4; i++)
  3416. sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
  3417. sbi->s_def_hash_version = es->s_def_hash_version;
  3418. if (ext4_has_feature_dir_index(sb)) {
  3419. i = le32_to_cpu(es->s_flags);
  3420. if (i & EXT2_FLAGS_UNSIGNED_HASH)
  3421. sbi->s_hash_unsigned = 3;
  3422. else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
  3423. #ifdef __CHAR_UNSIGNED__
  3424. if (!sb_rdonly(sb))
  3425. es->s_flags |=
  3426. cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
  3427. sbi->s_hash_unsigned = 3;
  3428. #else
  3429. if (!sb_rdonly(sb))
  3430. es->s_flags |=
  3431. cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
  3432. #endif
  3433. }
  3434. }
  3435. /* Handle clustersize */
  3436. clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
  3437. has_bigalloc = ext4_has_feature_bigalloc(sb);
  3438. if (has_bigalloc) {
  3439. if (clustersize < blocksize) {
  3440. ext4_msg(sb, KERN_ERR,
  3441. "cluster size (%d) smaller than "
  3442. "block size (%d)", clustersize, blocksize);
  3443. goto failed_mount;
  3444. }
  3445. if (le32_to_cpu(es->s_log_cluster_size) >
  3446. (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
  3447. ext4_msg(sb, KERN_ERR,
  3448. "Invalid log cluster size: %u",
  3449. le32_to_cpu(es->s_log_cluster_size));
  3450. goto failed_mount;
  3451. }
  3452. sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
  3453. le32_to_cpu(es->s_log_block_size);
  3454. sbi->s_clusters_per_group =
  3455. le32_to_cpu(es->s_clusters_per_group);
  3456. if (sbi->s_clusters_per_group > blocksize * 8) {
  3457. ext4_msg(sb, KERN_ERR,
  3458. "#clusters per group too big: %lu",
  3459. sbi->s_clusters_per_group);
  3460. goto failed_mount;
  3461. }
  3462. if (sbi->s_blocks_per_group !=
  3463. (sbi->s_clusters_per_group * (clustersize / blocksize))) {
  3464. ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
  3465. "clusters per group (%lu) inconsistent",
  3466. sbi->s_blocks_per_group,
  3467. sbi->s_clusters_per_group);
  3468. goto failed_mount;
  3469. }
  3470. } else {
  3471. if (clustersize != blocksize) {
  3472. ext4_warning(sb, "fragment/cluster size (%d) != "
  3473. "block size (%d)", clustersize,
  3474. blocksize);
  3475. clustersize = blocksize;
  3476. }
  3477. if (sbi->s_blocks_per_group > blocksize * 8) {
  3478. ext4_msg(sb, KERN_ERR,
  3479. "#blocks per group too big: %lu",
  3480. sbi->s_blocks_per_group);
  3481. goto failed_mount;
  3482. }
  3483. sbi->s_clusters_per_group = sbi->s_blocks_per_group;
  3484. sbi->s_cluster_bits = 0;
  3485. }
  3486. sbi->s_cluster_ratio = clustersize / blocksize;
  3487. /* Do we have standard group size of clustersize * 8 blocks ? */
  3488. if (sbi->s_blocks_per_group == clustersize << 3)
  3489. set_opt2(sb, STD_GROUP_SIZE);
  3490. /*
  3491. * Test whether we have more sectors than will fit in sector_t,
  3492. * and whether the max offset is addressable by the page cache.
  3493. */
  3494. err = generic_check_addressable(sb->s_blocksize_bits,
  3495. ext4_blocks_count(es));
  3496. if (err) {
  3497. ext4_msg(sb, KERN_ERR, "filesystem"
  3498. " too large to mount safely on this system");
  3499. if (sizeof(sector_t) < 8)
  3500. ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
  3501. goto failed_mount;
  3502. }
  3503. if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
  3504. goto cantfind_ext4;
  3505. /* check blocks count against device size */
  3506. blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
  3507. if (blocks_count && ext4_blocks_count(es) > blocks_count) {
  3508. ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
  3509. "exceeds size of device (%llu blocks)",
  3510. ext4_blocks_count(es), blocks_count);
  3511. goto failed_mount;
  3512. }
  3513. /*
  3514. * It makes no sense for the first data block to be beyond the end
  3515. * of the filesystem.
  3516. */
  3517. if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
  3518. ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
  3519. "block %u is beyond end of filesystem (%llu)",
  3520. le32_to_cpu(es->s_first_data_block),
  3521. ext4_blocks_count(es));
  3522. goto failed_mount;
  3523. }
  3524. blocks_count = (ext4_blocks_count(es) -
  3525. le32_to_cpu(es->s_first_data_block) +
  3526. EXT4_BLOCKS_PER_GROUP(sb) - 1);
  3527. do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
  3528. if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
  3529. ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
  3530. "(block count %llu, first data block %u, "
  3531. "blocks per group %lu)", sbi->s_groups_count,
  3532. ext4_blocks_count(es),
  3533. le32_to_cpu(es->s_first_data_block),
  3534. EXT4_BLOCKS_PER_GROUP(sb));
  3535. goto failed_mount;
  3536. }
  3537. sbi->s_groups_count = blocks_count;
  3538. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  3539. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  3540. db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
  3541. EXT4_DESC_PER_BLOCK(sb);
  3542. if (ext4_has_feature_meta_bg(sb)) {
  3543. if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
  3544. ext4_msg(sb, KERN_WARNING,
  3545. "first meta block group too large: %u "
  3546. "(group descriptor block count %u)",
  3547. le32_to_cpu(es->s_first_meta_bg), db_count);
  3548. goto failed_mount;
  3549. }
  3550. }
  3551. sbi->s_group_desc = kvmalloc(db_count *
  3552. sizeof(struct buffer_head *),
  3553. GFP_KERNEL);
  3554. if (sbi->s_group_desc == NULL) {
  3555. ext4_msg(sb, KERN_ERR, "not enough memory");
  3556. ret = -ENOMEM;
  3557. goto failed_mount;
  3558. }
  3559. bgl_lock_init(sbi->s_blockgroup_lock);
  3560. /* Pre-read the descriptors into the buffer cache */
  3561. for (i = 0; i < db_count; i++) {
  3562. block = descriptor_loc(sb, logical_sb_block, i);
  3563. sb_breadahead(sb, block);
  3564. }
  3565. for (i = 0; i < db_count; i++) {
  3566. block = descriptor_loc(sb, logical_sb_block, i);
  3567. sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
  3568. if (!sbi->s_group_desc[i]) {
  3569. ext4_msg(sb, KERN_ERR,
  3570. "can't read group descriptor %d", i);
  3571. db_count = i;
  3572. goto failed_mount2;
  3573. }
  3574. }
  3575. if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
  3576. ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
  3577. ret = -EFSCORRUPTED;
  3578. goto failed_mount2;
  3579. }
  3580. sbi->s_gdb_count = db_count;
  3581. timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
  3582. /* Register extent status tree shrinker */
  3583. if (ext4_es_register_shrinker(sbi))
  3584. goto failed_mount3;
  3585. sbi->s_stripe = ext4_get_stripe_size(sbi);
  3586. sbi->s_extent_max_zeroout_kb = 32;
  3587. /*
  3588. * set up enough so that it can read an inode
  3589. */
  3590. sb->s_op = &ext4_sops;
  3591. sb->s_export_op = &ext4_export_ops;
  3592. sb->s_xattr = ext4_xattr_handlers;
  3593. #ifdef CONFIG_EXT4_FS_ENCRYPTION
  3594. sb->s_cop = &ext4_cryptops;
  3595. #endif
  3596. #ifdef CONFIG_QUOTA
  3597. sb->dq_op = &ext4_quota_operations;
  3598. if (ext4_has_feature_quota(sb))
  3599. sb->s_qcop = &dquot_quotactl_sysfile_ops;
  3600. else
  3601. sb->s_qcop = &ext4_qctl_operations;
  3602. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  3603. #endif
  3604. memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
  3605. INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
  3606. mutex_init(&sbi->s_orphan_lock);
  3607. sb->s_root = NULL;
  3608. needs_recovery = (es->s_last_orphan != 0 ||
  3609. ext4_has_feature_journal_needs_recovery(sb));
  3610. if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
  3611. if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
  3612. goto failed_mount3a;
  3613. /*
  3614. * The first inode we look at is the journal inode. Don't try
  3615. * root first: it may be modified in the journal!
  3616. */
  3617. if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
  3618. err = ext4_load_journal(sb, es, journal_devnum);
  3619. if (err)
  3620. goto failed_mount3a;
  3621. } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
  3622. ext4_has_feature_journal_needs_recovery(sb)) {
  3623. ext4_msg(sb, KERN_ERR, "required journal recovery "
  3624. "suppressed and not mounted read-only");
  3625. goto failed_mount_wq;
  3626. } else {
  3627. /* Nojournal mode, all journal mount options are illegal */
  3628. if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
  3629. ext4_msg(sb, KERN_ERR, "can't mount with "
  3630. "journal_checksum, fs mounted w/o journal");
  3631. goto failed_mount_wq;
  3632. }
  3633. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3634. ext4_msg(sb, KERN_ERR, "can't mount with "
  3635. "journal_async_commit, fs mounted w/o journal");
  3636. goto failed_mount_wq;
  3637. }
  3638. if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
  3639. ext4_msg(sb, KERN_ERR, "can't mount with "
  3640. "commit=%lu, fs mounted w/o journal",
  3641. sbi->s_commit_interval / HZ);
  3642. goto failed_mount_wq;
  3643. }
  3644. if (EXT4_MOUNT_DATA_FLAGS &
  3645. (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
  3646. ext4_msg(sb, KERN_ERR, "can't mount with "
  3647. "data=, fs mounted w/o journal");
  3648. goto failed_mount_wq;
  3649. }
  3650. sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
  3651. clear_opt(sb, JOURNAL_CHECKSUM);
  3652. clear_opt(sb, DATA_FLAGS);
  3653. sbi->s_journal = NULL;
  3654. needs_recovery = 0;
  3655. goto no_journal;
  3656. }
  3657. if (ext4_has_feature_64bit(sb) &&
  3658. !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
  3659. JBD2_FEATURE_INCOMPAT_64BIT)) {
  3660. ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
  3661. goto failed_mount_wq;
  3662. }
  3663. if (!set_journal_csum_feature_set(sb)) {
  3664. ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
  3665. "feature set");
  3666. goto failed_mount_wq;
  3667. }
  3668. /* We have now updated the journal if required, so we can
  3669. * validate the data journaling mode. */
  3670. switch (test_opt(sb, DATA_FLAGS)) {
  3671. case 0:
  3672. /* No mode set, assume a default based on the journal
  3673. * capabilities: ORDERED_DATA if the journal can
  3674. * cope, else JOURNAL_DATA
  3675. */
  3676. if (jbd2_journal_check_available_features
  3677. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
  3678. set_opt(sb, ORDERED_DATA);
  3679. else
  3680. set_opt(sb, JOURNAL_DATA);
  3681. break;
  3682. case EXT4_MOUNT_ORDERED_DATA:
  3683. case EXT4_MOUNT_WRITEBACK_DATA:
  3684. if (!jbd2_journal_check_available_features
  3685. (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
  3686. ext4_msg(sb, KERN_ERR, "Journal does not support "
  3687. "requested data journaling mode");
  3688. goto failed_mount_wq;
  3689. }
  3690. default:
  3691. break;
  3692. }
  3693. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
  3694. test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  3695. ext4_msg(sb, KERN_ERR, "can't mount with "
  3696. "journal_async_commit in data=ordered mode");
  3697. goto failed_mount_wq;
  3698. }
  3699. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  3700. sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
  3701. no_journal:
  3702. if (!test_opt(sb, NO_MBCACHE)) {
  3703. sbi->s_ea_block_cache = ext4_xattr_create_cache();
  3704. if (!sbi->s_ea_block_cache) {
  3705. ext4_msg(sb, KERN_ERR,
  3706. "Failed to create ea_block_cache");
  3707. goto failed_mount_wq;
  3708. }
  3709. if (ext4_has_feature_ea_inode(sb)) {
  3710. sbi->s_ea_inode_cache = ext4_xattr_create_cache();
  3711. if (!sbi->s_ea_inode_cache) {
  3712. ext4_msg(sb, KERN_ERR,
  3713. "Failed to create ea_inode_cache");
  3714. goto failed_mount_wq;
  3715. }
  3716. }
  3717. }
  3718. if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
  3719. (blocksize != PAGE_SIZE)) {
  3720. ext4_msg(sb, KERN_ERR,
  3721. "Unsupported blocksize for fs encryption");
  3722. goto failed_mount_wq;
  3723. }
  3724. if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
  3725. !ext4_has_feature_encrypt(sb)) {
  3726. ext4_set_feature_encrypt(sb);
  3727. ext4_commit_super(sb, 1);
  3728. }
  3729. /*
  3730. * Get the # of file system overhead blocks from the
  3731. * superblock if present.
  3732. */
  3733. if (es->s_overhead_clusters)
  3734. sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
  3735. else {
  3736. err = ext4_calculate_overhead(sb);
  3737. if (err)
  3738. goto failed_mount_wq;
  3739. }
  3740. /*
  3741. * The maximum number of concurrent works can be high and
  3742. * concurrency isn't really necessary. Limit it to 1.
  3743. */
  3744. EXT4_SB(sb)->rsv_conversion_wq =
  3745. alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
  3746. if (!EXT4_SB(sb)->rsv_conversion_wq) {
  3747. printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
  3748. ret = -ENOMEM;
  3749. goto failed_mount4;
  3750. }
  3751. /*
  3752. * The jbd2_journal_load will have done any necessary log recovery,
  3753. * so we can safely mount the rest of the filesystem now.
  3754. */
  3755. root = ext4_iget(sb, EXT4_ROOT_INO);
  3756. if (IS_ERR(root)) {
  3757. ext4_msg(sb, KERN_ERR, "get root inode failed");
  3758. ret = PTR_ERR(root);
  3759. root = NULL;
  3760. goto failed_mount4;
  3761. }
  3762. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  3763. ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
  3764. iput(root);
  3765. goto failed_mount4;
  3766. }
  3767. sb->s_root = d_make_root(root);
  3768. if (!sb->s_root) {
  3769. ext4_msg(sb, KERN_ERR, "get root dentry failed");
  3770. ret = -ENOMEM;
  3771. goto failed_mount4;
  3772. }
  3773. if (ext4_setup_super(sb, es, sb_rdonly(sb)))
  3774. sb->s_flags |= SB_RDONLY;
  3775. /* determine the minimum size of new large inodes, if present */
  3776. if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
  3777. sbi->s_want_extra_isize == 0) {
  3778. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3779. EXT4_GOOD_OLD_INODE_SIZE;
  3780. if (ext4_has_feature_extra_isize(sb)) {
  3781. if (sbi->s_want_extra_isize <
  3782. le16_to_cpu(es->s_want_extra_isize))
  3783. sbi->s_want_extra_isize =
  3784. le16_to_cpu(es->s_want_extra_isize);
  3785. if (sbi->s_want_extra_isize <
  3786. le16_to_cpu(es->s_min_extra_isize))
  3787. sbi->s_want_extra_isize =
  3788. le16_to_cpu(es->s_min_extra_isize);
  3789. }
  3790. }
  3791. /* Check if enough inode space is available */
  3792. if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
  3793. sbi->s_inode_size) {
  3794. sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
  3795. EXT4_GOOD_OLD_INODE_SIZE;
  3796. ext4_msg(sb, KERN_INFO, "required extra inode space not"
  3797. "available");
  3798. }
  3799. ext4_set_resv_clusters(sb);
  3800. err = ext4_setup_system_zone(sb);
  3801. if (err) {
  3802. ext4_msg(sb, KERN_ERR, "failed to initialize system "
  3803. "zone (%d)", err);
  3804. goto failed_mount4a;
  3805. }
  3806. ext4_ext_init(sb);
  3807. err = ext4_mb_init(sb);
  3808. if (err) {
  3809. ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
  3810. err);
  3811. goto failed_mount5;
  3812. }
  3813. block = ext4_count_free_clusters(sb);
  3814. ext4_free_blocks_count_set(sbi->s_es,
  3815. EXT4_C2B(sbi, block));
  3816. err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
  3817. GFP_KERNEL);
  3818. if (!err) {
  3819. unsigned long freei = ext4_count_free_inodes(sb);
  3820. sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
  3821. err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
  3822. GFP_KERNEL);
  3823. }
  3824. if (!err)
  3825. err = percpu_counter_init(&sbi->s_dirs_counter,
  3826. ext4_count_dirs(sb), GFP_KERNEL);
  3827. if (!err)
  3828. err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
  3829. GFP_KERNEL);
  3830. if (!err)
  3831. err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
  3832. if (err) {
  3833. ext4_msg(sb, KERN_ERR, "insufficient memory");
  3834. goto failed_mount6;
  3835. }
  3836. if (ext4_has_feature_flex_bg(sb))
  3837. if (!ext4_fill_flex_info(sb)) {
  3838. ext4_msg(sb, KERN_ERR,
  3839. "unable to initialize "
  3840. "flex_bg meta info!");
  3841. goto failed_mount6;
  3842. }
  3843. err = ext4_register_li_request(sb, first_not_zeroed);
  3844. if (err)
  3845. goto failed_mount6;
  3846. err = ext4_register_sysfs(sb);
  3847. if (err)
  3848. goto failed_mount7;
  3849. #ifdef CONFIG_QUOTA
  3850. /* Enable quota usage during mount. */
  3851. if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
  3852. err = ext4_enable_quotas(sb);
  3853. if (err)
  3854. goto failed_mount8;
  3855. }
  3856. #endif /* CONFIG_QUOTA */
  3857. EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
  3858. ext4_orphan_cleanup(sb, es);
  3859. EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
  3860. if (needs_recovery) {
  3861. ext4_msg(sb, KERN_INFO, "recovery complete");
  3862. ext4_mark_recovery_complete(sb, es);
  3863. }
  3864. if (EXT4_SB(sb)->s_journal) {
  3865. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  3866. descr = " journalled data mode";
  3867. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  3868. descr = " ordered data mode";
  3869. else
  3870. descr = " writeback data mode";
  3871. } else
  3872. descr = "out journal";
  3873. if (test_opt(sb, DISCARD)) {
  3874. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  3875. if (!blk_queue_discard(q))
  3876. ext4_msg(sb, KERN_WARNING,
  3877. "mounting with \"discard\" option, but "
  3878. "the device does not support discard");
  3879. }
  3880. if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
  3881. ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
  3882. "Opts: %.*s%s%s", descr,
  3883. (int) sizeof(sbi->s_es->s_mount_opts),
  3884. sbi->s_es->s_mount_opts,
  3885. *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
  3886. if (es->s_error_count)
  3887. mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
  3888. /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
  3889. ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
  3890. ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
  3891. ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
  3892. kfree(orig_data);
  3893. return 0;
  3894. cantfind_ext4:
  3895. if (!silent)
  3896. ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
  3897. goto failed_mount;
  3898. #ifdef CONFIG_QUOTA
  3899. failed_mount8:
  3900. ext4_unregister_sysfs(sb);
  3901. #endif
  3902. failed_mount7:
  3903. ext4_unregister_li_request(sb);
  3904. failed_mount6:
  3905. ext4_mb_release(sb);
  3906. if (sbi->s_flex_groups)
  3907. kvfree(sbi->s_flex_groups);
  3908. percpu_counter_destroy(&sbi->s_freeclusters_counter);
  3909. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  3910. percpu_counter_destroy(&sbi->s_dirs_counter);
  3911. percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
  3912. failed_mount5:
  3913. ext4_ext_release(sb);
  3914. ext4_release_system_zone(sb);
  3915. failed_mount4a:
  3916. dput(sb->s_root);
  3917. sb->s_root = NULL;
  3918. failed_mount4:
  3919. ext4_msg(sb, KERN_ERR, "mount failed");
  3920. if (EXT4_SB(sb)->rsv_conversion_wq)
  3921. destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
  3922. failed_mount_wq:
  3923. if (sbi->s_ea_inode_cache) {
  3924. ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
  3925. sbi->s_ea_inode_cache = NULL;
  3926. }
  3927. if (sbi->s_ea_block_cache) {
  3928. ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
  3929. sbi->s_ea_block_cache = NULL;
  3930. }
  3931. if (sbi->s_journal) {
  3932. jbd2_journal_destroy(sbi->s_journal);
  3933. sbi->s_journal = NULL;
  3934. }
  3935. failed_mount3a:
  3936. ext4_es_unregister_shrinker(sbi);
  3937. failed_mount3:
  3938. del_timer_sync(&sbi->s_err_report);
  3939. if (sbi->s_mmp_tsk)
  3940. kthread_stop(sbi->s_mmp_tsk);
  3941. failed_mount2:
  3942. for (i = 0; i < db_count; i++)
  3943. brelse(sbi->s_group_desc[i]);
  3944. kvfree(sbi->s_group_desc);
  3945. failed_mount:
  3946. if (sbi->s_chksum_driver)
  3947. crypto_free_shash(sbi->s_chksum_driver);
  3948. #ifdef CONFIG_QUOTA
  3949. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  3950. kfree(sbi->s_qf_names[i]);
  3951. #endif
  3952. ext4_blkdev_remove(sbi);
  3953. brelse(bh);
  3954. out_fail:
  3955. sb->s_fs_info = NULL;
  3956. kfree(sbi->s_blockgroup_lock);
  3957. out_free_base:
  3958. kfree(sbi);
  3959. kfree(orig_data);
  3960. fs_put_dax(dax_dev);
  3961. return err ? err : ret;
  3962. }
  3963. /*
  3964. * Setup any per-fs journal parameters now. We'll do this both on
  3965. * initial mount, once the journal has been initialised but before we've
  3966. * done any recovery; and again on any subsequent remount.
  3967. */
  3968. static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
  3969. {
  3970. struct ext4_sb_info *sbi = EXT4_SB(sb);
  3971. journal->j_commit_interval = sbi->s_commit_interval;
  3972. journal->j_min_batch_time = sbi->s_min_batch_time;
  3973. journal->j_max_batch_time = sbi->s_max_batch_time;
  3974. write_lock(&journal->j_state_lock);
  3975. if (test_opt(sb, BARRIER))
  3976. journal->j_flags |= JBD2_BARRIER;
  3977. else
  3978. journal->j_flags &= ~JBD2_BARRIER;
  3979. if (test_opt(sb, DATA_ERR_ABORT))
  3980. journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
  3981. else
  3982. journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
  3983. write_unlock(&journal->j_state_lock);
  3984. }
  3985. static struct inode *ext4_get_journal_inode(struct super_block *sb,
  3986. unsigned int journal_inum)
  3987. {
  3988. struct inode *journal_inode;
  3989. /*
  3990. * Test for the existence of a valid inode on disk. Bad things
  3991. * happen if we iget() an unused inode, as the subsequent iput()
  3992. * will try to delete it.
  3993. */
  3994. journal_inode = ext4_iget(sb, journal_inum);
  3995. if (IS_ERR(journal_inode)) {
  3996. ext4_msg(sb, KERN_ERR, "no journal found");
  3997. return NULL;
  3998. }
  3999. if (!journal_inode->i_nlink) {
  4000. make_bad_inode(journal_inode);
  4001. iput(journal_inode);
  4002. ext4_msg(sb, KERN_ERR, "journal inode is deleted");
  4003. return NULL;
  4004. }
  4005. jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
  4006. journal_inode, journal_inode->i_size);
  4007. if (!S_ISREG(journal_inode->i_mode)) {
  4008. ext4_msg(sb, KERN_ERR, "invalid journal inode");
  4009. iput(journal_inode);
  4010. return NULL;
  4011. }
  4012. return journal_inode;
  4013. }
  4014. static journal_t *ext4_get_journal(struct super_block *sb,
  4015. unsigned int journal_inum)
  4016. {
  4017. struct inode *journal_inode;
  4018. journal_t *journal;
  4019. BUG_ON(!ext4_has_feature_journal(sb));
  4020. journal_inode = ext4_get_journal_inode(sb, journal_inum);
  4021. if (!journal_inode)
  4022. return NULL;
  4023. journal = jbd2_journal_init_inode(journal_inode);
  4024. if (!journal) {
  4025. ext4_msg(sb, KERN_ERR, "Could not load journal inode");
  4026. iput(journal_inode);
  4027. return NULL;
  4028. }
  4029. journal->j_private = sb;
  4030. ext4_init_journal_params(sb, journal);
  4031. return journal;
  4032. }
  4033. static journal_t *ext4_get_dev_journal(struct super_block *sb,
  4034. dev_t j_dev)
  4035. {
  4036. struct buffer_head *bh;
  4037. journal_t *journal;
  4038. ext4_fsblk_t start;
  4039. ext4_fsblk_t len;
  4040. int hblock, blocksize;
  4041. ext4_fsblk_t sb_block;
  4042. unsigned long offset;
  4043. struct ext4_super_block *es;
  4044. struct block_device *bdev;
  4045. BUG_ON(!ext4_has_feature_journal(sb));
  4046. bdev = ext4_blkdev_get(j_dev, sb);
  4047. if (bdev == NULL)
  4048. return NULL;
  4049. blocksize = sb->s_blocksize;
  4050. hblock = bdev_logical_block_size(bdev);
  4051. if (blocksize < hblock) {
  4052. ext4_msg(sb, KERN_ERR,
  4053. "blocksize too small for journal device");
  4054. goto out_bdev;
  4055. }
  4056. sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
  4057. offset = EXT4_MIN_BLOCK_SIZE % blocksize;
  4058. set_blocksize(bdev, blocksize);
  4059. if (!(bh = __bread(bdev, sb_block, blocksize))) {
  4060. ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
  4061. "external journal");
  4062. goto out_bdev;
  4063. }
  4064. es = (struct ext4_super_block *) (bh->b_data + offset);
  4065. if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
  4066. !(le32_to_cpu(es->s_feature_incompat) &
  4067. EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
  4068. ext4_msg(sb, KERN_ERR, "external journal has "
  4069. "bad superblock");
  4070. brelse(bh);
  4071. goto out_bdev;
  4072. }
  4073. if ((le32_to_cpu(es->s_feature_ro_compat) &
  4074. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
  4075. es->s_checksum != ext4_superblock_csum(sb, es)) {
  4076. ext4_msg(sb, KERN_ERR, "external journal has "
  4077. "corrupt superblock");
  4078. brelse(bh);
  4079. goto out_bdev;
  4080. }
  4081. if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
  4082. ext4_msg(sb, KERN_ERR, "journal UUID does not match");
  4083. brelse(bh);
  4084. goto out_bdev;
  4085. }
  4086. len = ext4_blocks_count(es);
  4087. start = sb_block + 1;
  4088. brelse(bh); /* we're done with the superblock */
  4089. journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
  4090. start, len, blocksize);
  4091. if (!journal) {
  4092. ext4_msg(sb, KERN_ERR, "failed to create device journal");
  4093. goto out_bdev;
  4094. }
  4095. journal->j_private = sb;
  4096. ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
  4097. wait_on_buffer(journal->j_sb_buffer);
  4098. if (!buffer_uptodate(journal->j_sb_buffer)) {
  4099. ext4_msg(sb, KERN_ERR, "I/O error on journal device");
  4100. goto out_journal;
  4101. }
  4102. if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
  4103. ext4_msg(sb, KERN_ERR, "External journal has more than one "
  4104. "user (unsupported) - %d",
  4105. be32_to_cpu(journal->j_superblock->s_nr_users));
  4106. goto out_journal;
  4107. }
  4108. EXT4_SB(sb)->journal_bdev = bdev;
  4109. ext4_init_journal_params(sb, journal);
  4110. return journal;
  4111. out_journal:
  4112. jbd2_journal_destroy(journal);
  4113. out_bdev:
  4114. ext4_blkdev_put(bdev);
  4115. return NULL;
  4116. }
  4117. static int ext4_load_journal(struct super_block *sb,
  4118. struct ext4_super_block *es,
  4119. unsigned long journal_devnum)
  4120. {
  4121. journal_t *journal;
  4122. unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
  4123. dev_t journal_dev;
  4124. int err = 0;
  4125. int really_read_only;
  4126. BUG_ON(!ext4_has_feature_journal(sb));
  4127. if (journal_devnum &&
  4128. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4129. ext4_msg(sb, KERN_INFO, "external journal device major/minor "
  4130. "numbers have changed");
  4131. journal_dev = new_decode_dev(journal_devnum);
  4132. } else
  4133. journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
  4134. really_read_only = bdev_read_only(sb->s_bdev);
  4135. /*
  4136. * Are we loading a blank journal or performing recovery after a
  4137. * crash? For recovery, we need to check in advance whether we
  4138. * can get read-write access to the device.
  4139. */
  4140. if (ext4_has_feature_journal_needs_recovery(sb)) {
  4141. if (sb_rdonly(sb)) {
  4142. ext4_msg(sb, KERN_INFO, "INFO: recovery "
  4143. "required on readonly filesystem");
  4144. if (really_read_only) {
  4145. ext4_msg(sb, KERN_ERR, "write access "
  4146. "unavailable, cannot proceed "
  4147. "(try mounting with noload)");
  4148. return -EROFS;
  4149. }
  4150. ext4_msg(sb, KERN_INFO, "write access will "
  4151. "be enabled during recovery");
  4152. }
  4153. }
  4154. if (journal_inum && journal_dev) {
  4155. ext4_msg(sb, KERN_ERR, "filesystem has both journal "
  4156. "and inode journals!");
  4157. return -EINVAL;
  4158. }
  4159. if (journal_inum) {
  4160. if (!(journal = ext4_get_journal(sb, journal_inum)))
  4161. return -EINVAL;
  4162. } else {
  4163. if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
  4164. return -EINVAL;
  4165. }
  4166. if (!(journal->j_flags & JBD2_BARRIER))
  4167. ext4_msg(sb, KERN_INFO, "barriers disabled");
  4168. if (!ext4_has_feature_journal_needs_recovery(sb))
  4169. err = jbd2_journal_wipe(journal, !really_read_only);
  4170. if (!err) {
  4171. char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
  4172. if (save)
  4173. memcpy(save, ((char *) es) +
  4174. EXT4_S_ERR_START, EXT4_S_ERR_LEN);
  4175. err = jbd2_journal_load(journal);
  4176. if (save)
  4177. memcpy(((char *) es) + EXT4_S_ERR_START,
  4178. save, EXT4_S_ERR_LEN);
  4179. kfree(save);
  4180. }
  4181. if (err) {
  4182. ext4_msg(sb, KERN_ERR, "error loading journal");
  4183. jbd2_journal_destroy(journal);
  4184. return err;
  4185. }
  4186. EXT4_SB(sb)->s_journal = journal;
  4187. ext4_clear_journal_err(sb, es);
  4188. if (!really_read_only && journal_devnum &&
  4189. journal_devnum != le32_to_cpu(es->s_journal_dev)) {
  4190. es->s_journal_dev = cpu_to_le32(journal_devnum);
  4191. /* Make sure we flush the recovery flag to disk. */
  4192. ext4_commit_super(sb, 1);
  4193. }
  4194. return 0;
  4195. }
  4196. static int ext4_commit_super(struct super_block *sb, int sync)
  4197. {
  4198. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  4199. struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
  4200. int error = 0;
  4201. if (!sbh || block_device_ejected(sb))
  4202. return error;
  4203. /*
  4204. * If the file system is mounted read-only, don't update the
  4205. * superblock write time. This avoids updating the superblock
  4206. * write time when we are mounting the root file system
  4207. * read/only but we need to replay the journal; at that point,
  4208. * for people who are east of GMT and who make their clock
  4209. * tick in localtime for Windows bug-for-bug compatibility,
  4210. * the clock is set in the future, and this will cause e2fsck
  4211. * to complain and force a full file system check.
  4212. */
  4213. if (!(sb->s_flags & SB_RDONLY))
  4214. es->s_wtime = cpu_to_le32(get_seconds());
  4215. if (sb->s_bdev->bd_part)
  4216. es->s_kbytes_written =
  4217. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
  4218. ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
  4219. EXT4_SB(sb)->s_sectors_written_start) >> 1));
  4220. else
  4221. es->s_kbytes_written =
  4222. cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
  4223. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
  4224. ext4_free_blocks_count_set(es,
  4225. EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
  4226. &EXT4_SB(sb)->s_freeclusters_counter)));
  4227. if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
  4228. es->s_free_inodes_count =
  4229. cpu_to_le32(percpu_counter_sum_positive(
  4230. &EXT4_SB(sb)->s_freeinodes_counter));
  4231. BUFFER_TRACE(sbh, "marking dirty");
  4232. ext4_superblock_csum_set(sb);
  4233. if (sync)
  4234. lock_buffer(sbh);
  4235. if (buffer_write_io_error(sbh)) {
  4236. /*
  4237. * Oh, dear. A previous attempt to write the
  4238. * superblock failed. This could happen because the
  4239. * USB device was yanked out. Or it could happen to
  4240. * be a transient write error and maybe the block will
  4241. * be remapped. Nothing we can do but to retry the
  4242. * write and hope for the best.
  4243. */
  4244. ext4_msg(sb, KERN_ERR, "previous I/O error to "
  4245. "superblock detected");
  4246. clear_buffer_write_io_error(sbh);
  4247. set_buffer_uptodate(sbh);
  4248. }
  4249. mark_buffer_dirty(sbh);
  4250. if (sync) {
  4251. unlock_buffer(sbh);
  4252. error = __sync_dirty_buffer(sbh,
  4253. REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
  4254. if (error)
  4255. return error;
  4256. error = buffer_write_io_error(sbh);
  4257. if (error) {
  4258. ext4_msg(sb, KERN_ERR, "I/O error while writing "
  4259. "superblock");
  4260. clear_buffer_write_io_error(sbh);
  4261. set_buffer_uptodate(sbh);
  4262. }
  4263. }
  4264. return error;
  4265. }
  4266. /*
  4267. * Have we just finished recovery? If so, and if we are mounting (or
  4268. * remounting) the filesystem readonly, then we will end up with a
  4269. * consistent fs on disk. Record that fact.
  4270. */
  4271. static void ext4_mark_recovery_complete(struct super_block *sb,
  4272. struct ext4_super_block *es)
  4273. {
  4274. journal_t *journal = EXT4_SB(sb)->s_journal;
  4275. if (!ext4_has_feature_journal(sb)) {
  4276. BUG_ON(journal != NULL);
  4277. return;
  4278. }
  4279. jbd2_journal_lock_updates(journal);
  4280. if (jbd2_journal_flush(journal) < 0)
  4281. goto out;
  4282. if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
  4283. ext4_clear_feature_journal_needs_recovery(sb);
  4284. ext4_commit_super(sb, 1);
  4285. }
  4286. out:
  4287. jbd2_journal_unlock_updates(journal);
  4288. }
  4289. /*
  4290. * If we are mounting (or read-write remounting) a filesystem whose journal
  4291. * has recorded an error from a previous lifetime, move that error to the
  4292. * main filesystem now.
  4293. */
  4294. static void ext4_clear_journal_err(struct super_block *sb,
  4295. struct ext4_super_block *es)
  4296. {
  4297. journal_t *journal;
  4298. int j_errno;
  4299. const char *errstr;
  4300. BUG_ON(!ext4_has_feature_journal(sb));
  4301. journal = EXT4_SB(sb)->s_journal;
  4302. /*
  4303. * Now check for any error status which may have been recorded in the
  4304. * journal by a prior ext4_error() or ext4_abort()
  4305. */
  4306. j_errno = jbd2_journal_errno(journal);
  4307. if (j_errno) {
  4308. char nbuf[16];
  4309. errstr = ext4_decode_error(sb, j_errno, nbuf);
  4310. ext4_warning(sb, "Filesystem error recorded "
  4311. "from previous mount: %s", errstr);
  4312. ext4_warning(sb, "Marking fs in need of filesystem check.");
  4313. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  4314. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  4315. ext4_commit_super(sb, 1);
  4316. jbd2_journal_clear_err(journal);
  4317. jbd2_journal_update_sb_errno(journal);
  4318. }
  4319. }
  4320. /*
  4321. * Force the running and committing transactions to commit,
  4322. * and wait on the commit.
  4323. */
  4324. int ext4_force_commit(struct super_block *sb)
  4325. {
  4326. journal_t *journal;
  4327. if (sb_rdonly(sb))
  4328. return 0;
  4329. journal = EXT4_SB(sb)->s_journal;
  4330. return ext4_journal_force_commit(journal);
  4331. }
  4332. static int ext4_sync_fs(struct super_block *sb, int wait)
  4333. {
  4334. int ret = 0;
  4335. tid_t target;
  4336. bool needs_barrier = false;
  4337. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4338. if (unlikely(ext4_forced_shutdown(sbi)))
  4339. return 0;
  4340. trace_ext4_sync_fs(sb, wait);
  4341. flush_workqueue(sbi->rsv_conversion_wq);
  4342. /*
  4343. * Writeback quota in non-journalled quota case - journalled quota has
  4344. * no dirty dquots
  4345. */
  4346. dquot_writeback_dquots(sb, -1);
  4347. /*
  4348. * Data writeback is possible w/o journal transaction, so barrier must
  4349. * being sent at the end of the function. But we can skip it if
  4350. * transaction_commit will do it for us.
  4351. */
  4352. if (sbi->s_journal) {
  4353. target = jbd2_get_latest_transaction(sbi->s_journal);
  4354. if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
  4355. !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
  4356. needs_barrier = true;
  4357. if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
  4358. if (wait)
  4359. ret = jbd2_log_wait_commit(sbi->s_journal,
  4360. target);
  4361. }
  4362. } else if (wait && test_opt(sb, BARRIER))
  4363. needs_barrier = true;
  4364. if (needs_barrier) {
  4365. int err;
  4366. err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
  4367. if (!ret)
  4368. ret = err;
  4369. }
  4370. return ret;
  4371. }
  4372. /*
  4373. * LVM calls this function before a (read-only) snapshot is created. This
  4374. * gives us a chance to flush the journal completely and mark the fs clean.
  4375. *
  4376. * Note that only this function cannot bring a filesystem to be in a clean
  4377. * state independently. It relies on upper layer to stop all data & metadata
  4378. * modifications.
  4379. */
  4380. static int ext4_freeze(struct super_block *sb)
  4381. {
  4382. int error = 0;
  4383. journal_t *journal;
  4384. if (sb_rdonly(sb))
  4385. return 0;
  4386. journal = EXT4_SB(sb)->s_journal;
  4387. if (journal) {
  4388. /* Now we set up the journal barrier. */
  4389. jbd2_journal_lock_updates(journal);
  4390. /*
  4391. * Don't clear the needs_recovery flag if we failed to
  4392. * flush the journal.
  4393. */
  4394. error = jbd2_journal_flush(journal);
  4395. if (error < 0)
  4396. goto out;
  4397. /* Journal blocked and flushed, clear needs_recovery flag. */
  4398. ext4_clear_feature_journal_needs_recovery(sb);
  4399. }
  4400. error = ext4_commit_super(sb, 1);
  4401. out:
  4402. if (journal)
  4403. /* we rely on upper layer to stop further updates */
  4404. jbd2_journal_unlock_updates(journal);
  4405. return error;
  4406. }
  4407. /*
  4408. * Called by LVM after the snapshot is done. We need to reset the RECOVER
  4409. * flag here, even though the filesystem is not technically dirty yet.
  4410. */
  4411. static int ext4_unfreeze(struct super_block *sb)
  4412. {
  4413. if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
  4414. return 0;
  4415. if (EXT4_SB(sb)->s_journal) {
  4416. /* Reset the needs_recovery flag before the fs is unlocked. */
  4417. ext4_set_feature_journal_needs_recovery(sb);
  4418. }
  4419. ext4_commit_super(sb, 1);
  4420. return 0;
  4421. }
  4422. /*
  4423. * Structure to save mount options for ext4_remount's benefit
  4424. */
  4425. struct ext4_mount_options {
  4426. unsigned long s_mount_opt;
  4427. unsigned long s_mount_opt2;
  4428. kuid_t s_resuid;
  4429. kgid_t s_resgid;
  4430. unsigned long s_commit_interval;
  4431. u32 s_min_batch_time, s_max_batch_time;
  4432. #ifdef CONFIG_QUOTA
  4433. int s_jquota_fmt;
  4434. char *s_qf_names[EXT4_MAXQUOTAS];
  4435. #endif
  4436. };
  4437. static int ext4_remount(struct super_block *sb, int *flags, char *data)
  4438. {
  4439. struct ext4_super_block *es;
  4440. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4441. unsigned long old_sb_flags;
  4442. struct ext4_mount_options old_opts;
  4443. int enable_quota = 0;
  4444. ext4_group_t g;
  4445. unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
  4446. int err = 0;
  4447. #ifdef CONFIG_QUOTA
  4448. int i, j;
  4449. #endif
  4450. char *orig_data = kstrdup(data, GFP_KERNEL);
  4451. /* Store the original options */
  4452. old_sb_flags = sb->s_flags;
  4453. old_opts.s_mount_opt = sbi->s_mount_opt;
  4454. old_opts.s_mount_opt2 = sbi->s_mount_opt2;
  4455. old_opts.s_resuid = sbi->s_resuid;
  4456. old_opts.s_resgid = sbi->s_resgid;
  4457. old_opts.s_commit_interval = sbi->s_commit_interval;
  4458. old_opts.s_min_batch_time = sbi->s_min_batch_time;
  4459. old_opts.s_max_batch_time = sbi->s_max_batch_time;
  4460. #ifdef CONFIG_QUOTA
  4461. old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
  4462. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4463. if (sbi->s_qf_names[i]) {
  4464. old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
  4465. GFP_KERNEL);
  4466. if (!old_opts.s_qf_names[i]) {
  4467. for (j = 0; j < i; j++)
  4468. kfree(old_opts.s_qf_names[j]);
  4469. kfree(orig_data);
  4470. return -ENOMEM;
  4471. }
  4472. } else
  4473. old_opts.s_qf_names[i] = NULL;
  4474. #endif
  4475. if (sbi->s_journal && sbi->s_journal->j_task->io_context)
  4476. journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
  4477. if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
  4478. err = -EINVAL;
  4479. goto restore_opts;
  4480. }
  4481. if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
  4482. test_opt(sb, JOURNAL_CHECKSUM)) {
  4483. ext4_msg(sb, KERN_ERR, "changing journal_checksum "
  4484. "during remount not supported; ignoring");
  4485. sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
  4486. }
  4487. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
  4488. if (test_opt2(sb, EXPLICIT_DELALLOC)) {
  4489. ext4_msg(sb, KERN_ERR, "can't mount with "
  4490. "both data=journal and delalloc");
  4491. err = -EINVAL;
  4492. goto restore_opts;
  4493. }
  4494. if (test_opt(sb, DIOREAD_NOLOCK)) {
  4495. ext4_msg(sb, KERN_ERR, "can't mount with "
  4496. "both data=journal and dioread_nolock");
  4497. err = -EINVAL;
  4498. goto restore_opts;
  4499. }
  4500. if (test_opt(sb, DAX)) {
  4501. ext4_msg(sb, KERN_ERR, "can't mount with "
  4502. "both data=journal and dax");
  4503. err = -EINVAL;
  4504. goto restore_opts;
  4505. }
  4506. } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
  4507. if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
  4508. ext4_msg(sb, KERN_ERR, "can't mount with "
  4509. "journal_async_commit in data=ordered mode");
  4510. err = -EINVAL;
  4511. goto restore_opts;
  4512. }
  4513. }
  4514. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
  4515. ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
  4516. err = -EINVAL;
  4517. goto restore_opts;
  4518. }
  4519. if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
  4520. ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
  4521. "dax flag with busy inodes while remounting");
  4522. sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
  4523. }
  4524. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
  4525. ext4_abort(sb, "Abort forced by user");
  4526. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  4527. (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
  4528. es = sbi->s_es;
  4529. if (sbi->s_journal) {
  4530. ext4_init_journal_params(sb, sbi->s_journal);
  4531. set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  4532. }
  4533. if (*flags & SB_LAZYTIME)
  4534. sb->s_flags |= SB_LAZYTIME;
  4535. if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
  4536. if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
  4537. err = -EROFS;
  4538. goto restore_opts;
  4539. }
  4540. if (*flags & SB_RDONLY) {
  4541. err = sync_filesystem(sb);
  4542. if (err < 0)
  4543. goto restore_opts;
  4544. err = dquot_suspend(sb, -1);
  4545. if (err < 0)
  4546. goto restore_opts;
  4547. /*
  4548. * First of all, the unconditional stuff we have to do
  4549. * to disable replay of the journal when we next remount
  4550. */
  4551. sb->s_flags |= SB_RDONLY;
  4552. /*
  4553. * OK, test if we are remounting a valid rw partition
  4554. * readonly, and if so set the rdonly flag and then
  4555. * mark the partition as valid again.
  4556. */
  4557. if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
  4558. (sbi->s_mount_state & EXT4_VALID_FS))
  4559. es->s_state = cpu_to_le16(sbi->s_mount_state);
  4560. if (sbi->s_journal)
  4561. ext4_mark_recovery_complete(sb, es);
  4562. } else {
  4563. /* Make sure we can mount this feature set readwrite */
  4564. if (ext4_has_feature_readonly(sb) ||
  4565. !ext4_feature_set_ok(sb, 0)) {
  4566. err = -EROFS;
  4567. goto restore_opts;
  4568. }
  4569. /*
  4570. * Make sure the group descriptor checksums
  4571. * are sane. If they aren't, refuse to remount r/w.
  4572. */
  4573. for (g = 0; g < sbi->s_groups_count; g++) {
  4574. struct ext4_group_desc *gdp =
  4575. ext4_get_group_desc(sb, g, NULL);
  4576. if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
  4577. ext4_msg(sb, KERN_ERR,
  4578. "ext4_remount: Checksum for group %u failed (%u!=%u)",
  4579. g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
  4580. le16_to_cpu(gdp->bg_checksum));
  4581. err = -EFSBADCRC;
  4582. goto restore_opts;
  4583. }
  4584. }
  4585. /*
  4586. * If we have an unprocessed orphan list hanging
  4587. * around from a previously readonly bdev mount,
  4588. * require a full umount/remount for now.
  4589. */
  4590. if (es->s_last_orphan) {
  4591. ext4_msg(sb, KERN_WARNING, "Couldn't "
  4592. "remount RDWR because of unprocessed "
  4593. "orphan inode list. Please "
  4594. "umount/remount instead");
  4595. err = -EINVAL;
  4596. goto restore_opts;
  4597. }
  4598. /*
  4599. * Mounting a RDONLY partition read-write, so reread
  4600. * and store the current valid flag. (It may have
  4601. * been changed by e2fsck since we originally mounted
  4602. * the partition.)
  4603. */
  4604. if (sbi->s_journal)
  4605. ext4_clear_journal_err(sb, es);
  4606. sbi->s_mount_state = le16_to_cpu(es->s_state);
  4607. if (!ext4_setup_super(sb, es, 0))
  4608. sb->s_flags &= ~SB_RDONLY;
  4609. if (ext4_has_feature_mmp(sb))
  4610. if (ext4_multi_mount_protect(sb,
  4611. le64_to_cpu(es->s_mmp_block))) {
  4612. err = -EROFS;
  4613. goto restore_opts;
  4614. }
  4615. enable_quota = 1;
  4616. }
  4617. }
  4618. /*
  4619. * Reinitialize lazy itable initialization thread based on
  4620. * current settings
  4621. */
  4622. if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
  4623. ext4_unregister_li_request(sb);
  4624. else {
  4625. ext4_group_t first_not_zeroed;
  4626. first_not_zeroed = ext4_has_uninit_itable(sb);
  4627. ext4_register_li_request(sb, first_not_zeroed);
  4628. }
  4629. ext4_setup_system_zone(sb);
  4630. if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY))
  4631. ext4_commit_super(sb, 1);
  4632. #ifdef CONFIG_QUOTA
  4633. /* Release old quota file names */
  4634. for (i = 0; i < EXT4_MAXQUOTAS; i++)
  4635. kfree(old_opts.s_qf_names[i]);
  4636. if (enable_quota) {
  4637. if (sb_any_quota_suspended(sb))
  4638. dquot_resume(sb, -1);
  4639. else if (ext4_has_feature_quota(sb)) {
  4640. err = ext4_enable_quotas(sb);
  4641. if (err)
  4642. goto restore_opts;
  4643. }
  4644. }
  4645. #endif
  4646. *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
  4647. ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
  4648. kfree(orig_data);
  4649. return 0;
  4650. restore_opts:
  4651. sb->s_flags = old_sb_flags;
  4652. sbi->s_mount_opt = old_opts.s_mount_opt;
  4653. sbi->s_mount_opt2 = old_opts.s_mount_opt2;
  4654. sbi->s_resuid = old_opts.s_resuid;
  4655. sbi->s_resgid = old_opts.s_resgid;
  4656. sbi->s_commit_interval = old_opts.s_commit_interval;
  4657. sbi->s_min_batch_time = old_opts.s_min_batch_time;
  4658. sbi->s_max_batch_time = old_opts.s_max_batch_time;
  4659. #ifdef CONFIG_QUOTA
  4660. sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
  4661. for (i = 0; i < EXT4_MAXQUOTAS; i++) {
  4662. kfree(sbi->s_qf_names[i]);
  4663. sbi->s_qf_names[i] = old_opts.s_qf_names[i];
  4664. }
  4665. #endif
  4666. kfree(orig_data);
  4667. return err;
  4668. }
  4669. #ifdef CONFIG_QUOTA
  4670. static int ext4_statfs_project(struct super_block *sb,
  4671. kprojid_t projid, struct kstatfs *buf)
  4672. {
  4673. struct kqid qid;
  4674. struct dquot *dquot;
  4675. u64 limit;
  4676. u64 curblock;
  4677. qid = make_kqid_projid(projid);
  4678. dquot = dqget(sb, qid);
  4679. if (IS_ERR(dquot))
  4680. return PTR_ERR(dquot);
  4681. spin_lock(&dquot->dq_dqb_lock);
  4682. limit = (dquot->dq_dqb.dqb_bsoftlimit ?
  4683. dquot->dq_dqb.dqb_bsoftlimit :
  4684. dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
  4685. if (limit && buf->f_blocks > limit) {
  4686. curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
  4687. buf->f_blocks = limit;
  4688. buf->f_bfree = buf->f_bavail =
  4689. (buf->f_blocks > curblock) ?
  4690. (buf->f_blocks - curblock) : 0;
  4691. }
  4692. limit = dquot->dq_dqb.dqb_isoftlimit ?
  4693. dquot->dq_dqb.dqb_isoftlimit :
  4694. dquot->dq_dqb.dqb_ihardlimit;
  4695. if (limit && buf->f_files > limit) {
  4696. buf->f_files = limit;
  4697. buf->f_ffree =
  4698. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  4699. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  4700. }
  4701. spin_unlock(&dquot->dq_dqb_lock);
  4702. dqput(dquot);
  4703. return 0;
  4704. }
  4705. #endif
  4706. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
  4707. {
  4708. struct super_block *sb = dentry->d_sb;
  4709. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4710. struct ext4_super_block *es = sbi->s_es;
  4711. ext4_fsblk_t overhead = 0, resv_blocks;
  4712. u64 fsid;
  4713. s64 bfree;
  4714. resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
  4715. if (!test_opt(sb, MINIX_DF))
  4716. overhead = sbi->s_overhead;
  4717. buf->f_type = EXT4_SUPER_MAGIC;
  4718. buf->f_bsize = sb->s_blocksize;
  4719. buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
  4720. bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
  4721. percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
  4722. /* prevent underflow in case that few free space is available */
  4723. buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
  4724. buf->f_bavail = buf->f_bfree -
  4725. (ext4_r_blocks_count(es) + resv_blocks);
  4726. if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
  4727. buf->f_bavail = 0;
  4728. buf->f_files = le32_to_cpu(es->s_inodes_count);
  4729. buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
  4730. buf->f_namelen = EXT4_NAME_LEN;
  4731. fsid = le64_to_cpup((void *)es->s_uuid) ^
  4732. le64_to_cpup((void *)es->s_uuid + sizeof(u64));
  4733. buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
  4734. buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
  4735. #ifdef CONFIG_QUOTA
  4736. if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
  4737. sb_has_quota_limits_enabled(sb, PRJQUOTA))
  4738. ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
  4739. #endif
  4740. return 0;
  4741. }
  4742. #ifdef CONFIG_QUOTA
  4743. /*
  4744. * Helper functions so that transaction is started before we acquire dqio_sem
  4745. * to keep correct lock ordering of transaction > dqio_sem
  4746. */
  4747. static inline struct inode *dquot_to_inode(struct dquot *dquot)
  4748. {
  4749. return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
  4750. }
  4751. static int ext4_write_dquot(struct dquot *dquot)
  4752. {
  4753. int ret, err;
  4754. handle_t *handle;
  4755. struct inode *inode;
  4756. inode = dquot_to_inode(dquot);
  4757. handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
  4758. EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
  4759. if (IS_ERR(handle))
  4760. return PTR_ERR(handle);
  4761. ret = dquot_commit(dquot);
  4762. err = ext4_journal_stop(handle);
  4763. if (!ret)
  4764. ret = err;
  4765. return ret;
  4766. }
  4767. static int ext4_acquire_dquot(struct dquot *dquot)
  4768. {
  4769. int ret, err;
  4770. handle_t *handle;
  4771. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4772. EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
  4773. if (IS_ERR(handle))
  4774. return PTR_ERR(handle);
  4775. ret = dquot_acquire(dquot);
  4776. err = ext4_journal_stop(handle);
  4777. if (!ret)
  4778. ret = err;
  4779. return ret;
  4780. }
  4781. static int ext4_release_dquot(struct dquot *dquot)
  4782. {
  4783. int ret, err;
  4784. handle_t *handle;
  4785. handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
  4786. EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
  4787. if (IS_ERR(handle)) {
  4788. /* Release dquot anyway to avoid endless cycle in dqput() */
  4789. dquot_release(dquot);
  4790. return PTR_ERR(handle);
  4791. }
  4792. ret = dquot_release(dquot);
  4793. err = ext4_journal_stop(handle);
  4794. if (!ret)
  4795. ret = err;
  4796. return ret;
  4797. }
  4798. static int ext4_mark_dquot_dirty(struct dquot *dquot)
  4799. {
  4800. struct super_block *sb = dquot->dq_sb;
  4801. struct ext4_sb_info *sbi = EXT4_SB(sb);
  4802. /* Are we journaling quotas? */
  4803. if (ext4_has_feature_quota(sb) ||
  4804. sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
  4805. dquot_mark_dquot_dirty(dquot);
  4806. return ext4_write_dquot(dquot);
  4807. } else {
  4808. return dquot_mark_dquot_dirty(dquot);
  4809. }
  4810. }
  4811. static int ext4_write_info(struct super_block *sb, int type)
  4812. {
  4813. int ret, err;
  4814. handle_t *handle;
  4815. /* Data block + inode block */
  4816. handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
  4817. if (IS_ERR(handle))
  4818. return PTR_ERR(handle);
  4819. ret = dquot_commit_info(sb, type);
  4820. err = ext4_journal_stop(handle);
  4821. if (!ret)
  4822. ret = err;
  4823. return ret;
  4824. }
  4825. /*
  4826. * Turn on quotas during mount time - we need to find
  4827. * the quota file and such...
  4828. */
  4829. static int ext4_quota_on_mount(struct super_block *sb, int type)
  4830. {
  4831. return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
  4832. EXT4_SB(sb)->s_jquota_fmt, type);
  4833. }
  4834. static void lockdep_set_quota_inode(struct inode *inode, int subclass)
  4835. {
  4836. struct ext4_inode_info *ei = EXT4_I(inode);
  4837. /* The first argument of lockdep_set_subclass has to be
  4838. * *exactly* the same as the argument to init_rwsem() --- in
  4839. * this case, in init_once() --- or lockdep gets unhappy
  4840. * because the name of the lock is set using the
  4841. * stringification of the argument to init_rwsem().
  4842. */
  4843. (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
  4844. lockdep_set_subclass(&ei->i_data_sem, subclass);
  4845. }
  4846. /*
  4847. * Standard function to be called on quota_on
  4848. */
  4849. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  4850. const struct path *path)
  4851. {
  4852. int err;
  4853. if (!test_opt(sb, QUOTA))
  4854. return -EINVAL;
  4855. /* Quotafile not on the same filesystem? */
  4856. if (path->dentry->d_sb != sb)
  4857. return -EXDEV;
  4858. /* Journaling quota? */
  4859. if (EXT4_SB(sb)->s_qf_names[type]) {
  4860. /* Quotafile not in fs root? */
  4861. if (path->dentry->d_parent != sb->s_root)
  4862. ext4_msg(sb, KERN_WARNING,
  4863. "Quota file not on filesystem root. "
  4864. "Journaled quota will not work");
  4865. sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
  4866. } else {
  4867. /*
  4868. * Clear the flag just in case mount options changed since
  4869. * last time.
  4870. */
  4871. sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
  4872. }
  4873. /*
  4874. * When we journal data on quota file, we have to flush journal to see
  4875. * all updates to the file when we bypass pagecache...
  4876. */
  4877. if (EXT4_SB(sb)->s_journal &&
  4878. ext4_should_journal_data(d_inode(path->dentry))) {
  4879. /*
  4880. * We don't need to lock updates but journal_flush() could
  4881. * otherwise be livelocked...
  4882. */
  4883. jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
  4884. err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
  4885. jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
  4886. if (err)
  4887. return err;
  4888. }
  4889. lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
  4890. err = dquot_quota_on(sb, type, format_id, path);
  4891. if (err) {
  4892. lockdep_set_quota_inode(path->dentry->d_inode,
  4893. I_DATA_SEM_NORMAL);
  4894. } else {
  4895. struct inode *inode = d_inode(path->dentry);
  4896. handle_t *handle;
  4897. /*
  4898. * Set inode flags to prevent userspace from messing with quota
  4899. * files. If this fails, we return success anyway since quotas
  4900. * are already enabled and this is not a hard failure.
  4901. */
  4902. inode_lock(inode);
  4903. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  4904. if (IS_ERR(handle))
  4905. goto unlock_inode;
  4906. EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
  4907. inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
  4908. S_NOATIME | S_IMMUTABLE);
  4909. ext4_mark_inode_dirty(handle, inode);
  4910. ext4_journal_stop(handle);
  4911. unlock_inode:
  4912. inode_unlock(inode);
  4913. }
  4914. return err;
  4915. }
  4916. static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
  4917. unsigned int flags)
  4918. {
  4919. int err;
  4920. struct inode *qf_inode;
  4921. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4922. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4923. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4924. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4925. };
  4926. BUG_ON(!ext4_has_feature_quota(sb));
  4927. if (!qf_inums[type])
  4928. return -EPERM;
  4929. qf_inode = ext4_iget(sb, qf_inums[type]);
  4930. if (IS_ERR(qf_inode)) {
  4931. ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
  4932. return PTR_ERR(qf_inode);
  4933. }
  4934. /* Don't account quota for quota files to avoid recursion */
  4935. qf_inode->i_flags |= S_NOQUOTA;
  4936. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
  4937. err = dquot_enable(qf_inode, type, format_id, flags);
  4938. iput(qf_inode);
  4939. if (err)
  4940. lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
  4941. return err;
  4942. }
  4943. /* Enable usage tracking for all quota types. */
  4944. static int ext4_enable_quotas(struct super_block *sb)
  4945. {
  4946. int type, err = 0;
  4947. unsigned long qf_inums[EXT4_MAXQUOTAS] = {
  4948. le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
  4949. le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
  4950. le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
  4951. };
  4952. bool quota_mopt[EXT4_MAXQUOTAS] = {
  4953. test_opt(sb, USRQUOTA),
  4954. test_opt(sb, GRPQUOTA),
  4955. test_opt(sb, PRJQUOTA),
  4956. };
  4957. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
  4958. for (type = 0; type < EXT4_MAXQUOTAS; type++) {
  4959. if (qf_inums[type]) {
  4960. err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
  4961. DQUOT_USAGE_ENABLED |
  4962. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  4963. if (err) {
  4964. for (type--; type >= 0; type--)
  4965. dquot_quota_off(sb, type);
  4966. ext4_warning(sb,
  4967. "Failed to enable quota tracking "
  4968. "(type=%d, err=%d). Please run "
  4969. "e2fsck to fix.", type, err);
  4970. return err;
  4971. }
  4972. }
  4973. }
  4974. return 0;
  4975. }
  4976. static int ext4_quota_off(struct super_block *sb, int type)
  4977. {
  4978. struct inode *inode = sb_dqopt(sb)->files[type];
  4979. handle_t *handle;
  4980. int err;
  4981. /* Force all delayed allocation blocks to be allocated.
  4982. * Caller already holds s_umount sem */
  4983. if (test_opt(sb, DELALLOC))
  4984. sync_filesystem(sb);
  4985. if (!inode || !igrab(inode))
  4986. goto out;
  4987. err = dquot_quota_off(sb, type);
  4988. if (err || ext4_has_feature_quota(sb))
  4989. goto out_put;
  4990. inode_lock(inode);
  4991. /*
  4992. * Update modification times of quota files when userspace can
  4993. * start looking at them. If we fail, we return success anyway since
  4994. * this is not a hard failure and quotas are already disabled.
  4995. */
  4996. handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
  4997. if (IS_ERR(handle))
  4998. goto out_unlock;
  4999. EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
  5000. inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
  5001. inode->i_mtime = inode->i_ctime = current_time(inode);
  5002. ext4_mark_inode_dirty(handle, inode);
  5003. ext4_journal_stop(handle);
  5004. out_unlock:
  5005. inode_unlock(inode);
  5006. out_put:
  5007. lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
  5008. iput(inode);
  5009. return err;
  5010. out:
  5011. return dquot_quota_off(sb, type);
  5012. }
  5013. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  5014. * acquiring the locks... As quota files are never truncated and quota code
  5015. * itself serializes the operations (and no one else should touch the files)
  5016. * we don't have to be afraid of races */
  5017. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  5018. size_t len, loff_t off)
  5019. {
  5020. struct inode *inode = sb_dqopt(sb)->files[type];
  5021. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5022. int offset = off & (sb->s_blocksize - 1);
  5023. int tocopy;
  5024. size_t toread;
  5025. struct buffer_head *bh;
  5026. loff_t i_size = i_size_read(inode);
  5027. if (off > i_size)
  5028. return 0;
  5029. if (off+len > i_size)
  5030. len = i_size-off;
  5031. toread = len;
  5032. while (toread > 0) {
  5033. tocopy = sb->s_blocksize - offset < toread ?
  5034. sb->s_blocksize - offset : toread;
  5035. bh = ext4_bread(NULL, inode, blk, 0);
  5036. if (IS_ERR(bh))
  5037. return PTR_ERR(bh);
  5038. if (!bh) /* A hole? */
  5039. memset(data, 0, tocopy);
  5040. else
  5041. memcpy(data, bh->b_data+offset, tocopy);
  5042. brelse(bh);
  5043. offset = 0;
  5044. toread -= tocopy;
  5045. data += tocopy;
  5046. blk++;
  5047. }
  5048. return len;
  5049. }
  5050. /* Write to quotafile (we know the transaction is already started and has
  5051. * enough credits) */
  5052. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  5053. const char *data, size_t len, loff_t off)
  5054. {
  5055. struct inode *inode = sb_dqopt(sb)->files[type];
  5056. ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
  5057. int err, offset = off & (sb->s_blocksize - 1);
  5058. int retries = 0;
  5059. struct buffer_head *bh;
  5060. handle_t *handle = journal_current_handle();
  5061. if (EXT4_SB(sb)->s_journal && !handle) {
  5062. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5063. " cancelled because transaction is not started",
  5064. (unsigned long long)off, (unsigned long long)len);
  5065. return -EIO;
  5066. }
  5067. /*
  5068. * Since we account only one data block in transaction credits,
  5069. * then it is impossible to cross a block boundary.
  5070. */
  5071. if (sb->s_blocksize - offset < len) {
  5072. ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
  5073. " cancelled because not block aligned",
  5074. (unsigned long long)off, (unsigned long long)len);
  5075. return -EIO;
  5076. }
  5077. do {
  5078. bh = ext4_bread(handle, inode, blk,
  5079. EXT4_GET_BLOCKS_CREATE |
  5080. EXT4_GET_BLOCKS_METADATA_NOFAIL);
  5081. } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
  5082. ext4_should_retry_alloc(inode->i_sb, &retries));
  5083. if (IS_ERR(bh))
  5084. return PTR_ERR(bh);
  5085. if (!bh)
  5086. goto out;
  5087. BUFFER_TRACE(bh, "get write access");
  5088. err = ext4_journal_get_write_access(handle, bh);
  5089. if (err) {
  5090. brelse(bh);
  5091. return err;
  5092. }
  5093. lock_buffer(bh);
  5094. memcpy(bh->b_data+offset, data, len);
  5095. flush_dcache_page(bh->b_page);
  5096. unlock_buffer(bh);
  5097. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  5098. brelse(bh);
  5099. out:
  5100. if (inode->i_size < off + len) {
  5101. i_size_write(inode, off + len);
  5102. EXT4_I(inode)->i_disksize = inode->i_size;
  5103. ext4_mark_inode_dirty(handle, inode);
  5104. }
  5105. return len;
  5106. }
  5107. static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
  5108. {
  5109. const struct quota_format_ops *ops;
  5110. if (!sb_has_quota_loaded(sb, qid->type))
  5111. return -ESRCH;
  5112. ops = sb_dqopt(sb)->ops[qid->type];
  5113. if (!ops || !ops->get_next_id)
  5114. return -ENOSYS;
  5115. return dquot_get_next_id(sb, qid);
  5116. }
  5117. #endif
  5118. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  5119. const char *dev_name, void *data)
  5120. {
  5121. return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
  5122. }
  5123. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
  5124. static inline void register_as_ext2(void)
  5125. {
  5126. int err = register_filesystem(&ext2_fs_type);
  5127. if (err)
  5128. printk(KERN_WARNING
  5129. "EXT4-fs: Unable to register as ext2 (%d)\n", err);
  5130. }
  5131. static inline void unregister_as_ext2(void)
  5132. {
  5133. unregister_filesystem(&ext2_fs_type);
  5134. }
  5135. static inline int ext2_feature_set_ok(struct super_block *sb)
  5136. {
  5137. if (ext4_has_unknown_ext2_incompat_features(sb))
  5138. return 0;
  5139. if (sb_rdonly(sb))
  5140. return 1;
  5141. if (ext4_has_unknown_ext2_ro_compat_features(sb))
  5142. return 0;
  5143. return 1;
  5144. }
  5145. #else
  5146. static inline void register_as_ext2(void) { }
  5147. static inline void unregister_as_ext2(void) { }
  5148. static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
  5149. #endif
  5150. static inline void register_as_ext3(void)
  5151. {
  5152. int err = register_filesystem(&ext3_fs_type);
  5153. if (err)
  5154. printk(KERN_WARNING
  5155. "EXT4-fs: Unable to register as ext3 (%d)\n", err);
  5156. }
  5157. static inline void unregister_as_ext3(void)
  5158. {
  5159. unregister_filesystem(&ext3_fs_type);
  5160. }
  5161. static inline int ext3_feature_set_ok(struct super_block *sb)
  5162. {
  5163. if (ext4_has_unknown_ext3_incompat_features(sb))
  5164. return 0;
  5165. if (!ext4_has_feature_journal(sb))
  5166. return 0;
  5167. if (sb_rdonly(sb))
  5168. return 1;
  5169. if (ext4_has_unknown_ext3_ro_compat_features(sb))
  5170. return 0;
  5171. return 1;
  5172. }
  5173. static struct file_system_type ext4_fs_type = {
  5174. .owner = THIS_MODULE,
  5175. .name = "ext4",
  5176. .mount = ext4_mount,
  5177. .kill_sb = kill_block_super,
  5178. .fs_flags = FS_REQUIRES_DEV,
  5179. };
  5180. MODULE_ALIAS_FS("ext4");
  5181. /* Shared across all ext4 file systems */
  5182. wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
  5183. static int __init ext4_init_fs(void)
  5184. {
  5185. int i, err;
  5186. ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
  5187. ext4_li_info = NULL;
  5188. mutex_init(&ext4_li_mtx);
  5189. /* Build-time check for flags consistency */
  5190. ext4_check_flag_values();
  5191. for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
  5192. init_waitqueue_head(&ext4__ioend_wq[i]);
  5193. err = ext4_init_es();
  5194. if (err)
  5195. return err;
  5196. err = ext4_init_pageio();
  5197. if (err)
  5198. goto out5;
  5199. err = ext4_init_system_zone();
  5200. if (err)
  5201. goto out4;
  5202. err = ext4_init_sysfs();
  5203. if (err)
  5204. goto out3;
  5205. err = ext4_init_mballoc();
  5206. if (err)
  5207. goto out2;
  5208. err = init_inodecache();
  5209. if (err)
  5210. goto out1;
  5211. register_as_ext3();
  5212. register_as_ext2();
  5213. err = register_filesystem(&ext4_fs_type);
  5214. if (err)
  5215. goto out;
  5216. return 0;
  5217. out:
  5218. unregister_as_ext2();
  5219. unregister_as_ext3();
  5220. destroy_inodecache();
  5221. out1:
  5222. ext4_exit_mballoc();
  5223. out2:
  5224. ext4_exit_sysfs();
  5225. out3:
  5226. ext4_exit_system_zone();
  5227. out4:
  5228. ext4_exit_pageio();
  5229. out5:
  5230. ext4_exit_es();
  5231. return err;
  5232. }
  5233. static void __exit ext4_exit_fs(void)
  5234. {
  5235. ext4_destroy_lazyinit_thread();
  5236. unregister_as_ext2();
  5237. unregister_as_ext3();
  5238. unregister_filesystem(&ext4_fs_type);
  5239. destroy_inodecache();
  5240. ext4_exit_mballoc();
  5241. ext4_exit_sysfs();
  5242. ext4_exit_system_zone();
  5243. ext4_exit_pageio();
  5244. ext4_exit_es();
  5245. }
  5246. MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
  5247. MODULE_DESCRIPTION("Fourth Extended Filesystem");
  5248. MODULE_LICENSE("GPL");
  5249. module_init(ext4_init_fs)
  5250. module_exit(ext4_exit_fs)