super.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905
  1. /*
  2. * fs/f2fs/super.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/fs.h>
  14. #include <linux/statfs.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/backing-dev.h>
  17. #include <linux/kthread.h>
  18. #include <linux/parser.h>
  19. #include <linux/mount.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/proc_fs.h>
  22. #include <linux/random.h>
  23. #include <linux/exportfs.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/quotaops.h>
  26. #include <linux/f2fs_fs.h>
  27. #include <linux/sysfs.h>
  28. #include <linux/quota.h>
  29. #include "f2fs.h"
  30. #include "node.h"
  31. #include "segment.h"
  32. #include "xattr.h"
  33. #include "gc.h"
  34. #include "trace.h"
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/f2fs.h>
  37. static struct kmem_cache *f2fs_inode_cachep;
  38. #ifdef CONFIG_F2FS_FAULT_INJECTION
  39. char *fault_name[FAULT_MAX] = {
  40. [FAULT_KMALLOC] = "kmalloc",
  41. [FAULT_PAGE_ALLOC] = "page alloc",
  42. [FAULT_PAGE_GET] = "page get",
  43. [FAULT_ALLOC_BIO] = "alloc bio",
  44. [FAULT_ALLOC_NID] = "alloc nid",
  45. [FAULT_ORPHAN] = "orphan",
  46. [FAULT_BLOCK] = "no more block",
  47. [FAULT_DIR_DEPTH] = "too big dir depth",
  48. [FAULT_EVICT_INODE] = "evict_inode fail",
  49. [FAULT_TRUNCATE] = "truncate fail",
  50. [FAULT_IO] = "IO error",
  51. [FAULT_CHECKPOINT] = "checkpoint error",
  52. };
  53. static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
  54. unsigned int rate)
  55. {
  56. struct f2fs_fault_info *ffi = &sbi->fault_info;
  57. if (rate) {
  58. atomic_set(&ffi->inject_ops, 0);
  59. ffi->inject_rate = rate;
  60. ffi->inject_type = (1 << FAULT_MAX) - 1;
  61. } else {
  62. memset(ffi, 0, sizeof(struct f2fs_fault_info));
  63. }
  64. }
  65. #endif
  66. /* f2fs-wide shrinker description */
  67. static struct shrinker f2fs_shrinker_info = {
  68. .scan_objects = f2fs_shrink_scan,
  69. .count_objects = f2fs_shrink_count,
  70. .seeks = DEFAULT_SEEKS,
  71. };
  72. enum {
  73. Opt_gc_background,
  74. Opt_disable_roll_forward,
  75. Opt_norecovery,
  76. Opt_discard,
  77. Opt_nodiscard,
  78. Opt_noheap,
  79. Opt_heap,
  80. Opt_user_xattr,
  81. Opt_nouser_xattr,
  82. Opt_acl,
  83. Opt_noacl,
  84. Opt_active_logs,
  85. Opt_disable_ext_identify,
  86. Opt_inline_xattr,
  87. Opt_noinline_xattr,
  88. Opt_inline_xattr_size,
  89. Opt_inline_data,
  90. Opt_inline_dentry,
  91. Opt_noinline_dentry,
  92. Opt_flush_merge,
  93. Opt_noflush_merge,
  94. Opt_nobarrier,
  95. Opt_fastboot,
  96. Opt_extent_cache,
  97. Opt_noextent_cache,
  98. Opt_noinline_data,
  99. Opt_data_flush,
  100. Opt_mode,
  101. Opt_io_size_bits,
  102. Opt_fault_injection,
  103. Opt_lazytime,
  104. Opt_nolazytime,
  105. Opt_quota,
  106. Opt_noquota,
  107. Opt_usrquota,
  108. Opt_grpquota,
  109. Opt_prjquota,
  110. Opt_usrjquota,
  111. Opt_grpjquota,
  112. Opt_prjjquota,
  113. Opt_offusrjquota,
  114. Opt_offgrpjquota,
  115. Opt_offprjjquota,
  116. Opt_jqfmt_vfsold,
  117. Opt_jqfmt_vfsv0,
  118. Opt_jqfmt_vfsv1,
  119. Opt_err,
  120. };
  121. static match_table_t f2fs_tokens = {
  122. {Opt_gc_background, "background_gc=%s"},
  123. {Opt_disable_roll_forward, "disable_roll_forward"},
  124. {Opt_norecovery, "norecovery"},
  125. {Opt_discard, "discard"},
  126. {Opt_nodiscard, "nodiscard"},
  127. {Opt_noheap, "no_heap"},
  128. {Opt_heap, "heap"},
  129. {Opt_user_xattr, "user_xattr"},
  130. {Opt_nouser_xattr, "nouser_xattr"},
  131. {Opt_acl, "acl"},
  132. {Opt_noacl, "noacl"},
  133. {Opt_active_logs, "active_logs=%u"},
  134. {Opt_disable_ext_identify, "disable_ext_identify"},
  135. {Opt_inline_xattr, "inline_xattr"},
  136. {Opt_noinline_xattr, "noinline_xattr"},
  137. {Opt_inline_xattr_size, "inline_xattr_size=%u"},
  138. {Opt_inline_data, "inline_data"},
  139. {Opt_inline_dentry, "inline_dentry"},
  140. {Opt_noinline_dentry, "noinline_dentry"},
  141. {Opt_flush_merge, "flush_merge"},
  142. {Opt_noflush_merge, "noflush_merge"},
  143. {Opt_nobarrier, "nobarrier"},
  144. {Opt_fastboot, "fastboot"},
  145. {Opt_extent_cache, "extent_cache"},
  146. {Opt_noextent_cache, "noextent_cache"},
  147. {Opt_noinline_data, "noinline_data"},
  148. {Opt_data_flush, "data_flush"},
  149. {Opt_mode, "mode=%s"},
  150. {Opt_io_size_bits, "io_bits=%u"},
  151. {Opt_fault_injection, "fault_injection=%u"},
  152. {Opt_lazytime, "lazytime"},
  153. {Opt_nolazytime, "nolazytime"},
  154. {Opt_quota, "quota"},
  155. {Opt_noquota, "noquota"},
  156. {Opt_usrquota, "usrquota"},
  157. {Opt_grpquota, "grpquota"},
  158. {Opt_prjquota, "prjquota"},
  159. {Opt_usrjquota, "usrjquota=%s"},
  160. {Opt_grpjquota, "grpjquota=%s"},
  161. {Opt_prjjquota, "prjjquota=%s"},
  162. {Opt_offusrjquota, "usrjquota="},
  163. {Opt_offgrpjquota, "grpjquota="},
  164. {Opt_offprjjquota, "prjjquota="},
  165. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  166. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  167. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  168. {Opt_err, NULL},
  169. };
  170. void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
  171. {
  172. struct va_format vaf;
  173. va_list args;
  174. va_start(args, fmt);
  175. vaf.fmt = fmt;
  176. vaf.va = &args;
  177. printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
  178. va_end(args);
  179. }
  180. static void init_once(void *foo)
  181. {
  182. struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
  183. inode_init_once(&fi->vfs_inode);
  184. }
  185. #ifdef CONFIG_QUOTA
  186. static const char * const quotatypes[] = INITQFNAMES;
  187. #define QTYPE2NAME(t) (quotatypes[t])
  188. static int f2fs_set_qf_name(struct super_block *sb, int qtype,
  189. substring_t *args)
  190. {
  191. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  192. char *qname;
  193. int ret = -EINVAL;
  194. if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) {
  195. f2fs_msg(sb, KERN_ERR,
  196. "Cannot change journaled "
  197. "quota options when quota turned on");
  198. return -EINVAL;
  199. }
  200. if (f2fs_sb_has_quota_ino(sb)) {
  201. f2fs_msg(sb, KERN_INFO,
  202. "QUOTA feature is enabled, so ignore qf_name");
  203. return 0;
  204. }
  205. qname = match_strdup(args);
  206. if (!qname) {
  207. f2fs_msg(sb, KERN_ERR,
  208. "Not enough memory for storing quotafile name");
  209. return -EINVAL;
  210. }
  211. if (sbi->s_qf_names[qtype]) {
  212. if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
  213. ret = 0;
  214. else
  215. f2fs_msg(sb, KERN_ERR,
  216. "%s quota file already specified",
  217. QTYPE2NAME(qtype));
  218. goto errout;
  219. }
  220. if (strchr(qname, '/')) {
  221. f2fs_msg(sb, KERN_ERR,
  222. "quotafile must be on filesystem root");
  223. goto errout;
  224. }
  225. sbi->s_qf_names[qtype] = qname;
  226. set_opt(sbi, QUOTA);
  227. return 0;
  228. errout:
  229. kfree(qname);
  230. return ret;
  231. }
  232. static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
  233. {
  234. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  235. if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) {
  236. f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
  237. " when quota turned on");
  238. return -EINVAL;
  239. }
  240. kfree(sbi->s_qf_names[qtype]);
  241. sbi->s_qf_names[qtype] = NULL;
  242. return 0;
  243. }
  244. static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
  245. {
  246. /*
  247. * We do the test below only for project quotas. 'usrquota' and
  248. * 'grpquota' mount options are allowed even without quota feature
  249. * to support legacy quotas in quota files.
  250. */
  251. if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
  252. f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
  253. "Cannot enable project quota enforcement.");
  254. return -1;
  255. }
  256. if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
  257. sbi->s_qf_names[PRJQUOTA]) {
  258. if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
  259. clear_opt(sbi, USRQUOTA);
  260. if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
  261. clear_opt(sbi, GRPQUOTA);
  262. if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
  263. clear_opt(sbi, PRJQUOTA);
  264. if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
  265. test_opt(sbi, PRJQUOTA)) {
  266. f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
  267. "format mixing");
  268. return -1;
  269. }
  270. if (!sbi->s_jquota_fmt) {
  271. f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
  272. "not specified");
  273. return -1;
  274. }
  275. }
  276. if (f2fs_sb_has_quota_ino(sbi->sb) && sbi->s_jquota_fmt) {
  277. f2fs_msg(sbi->sb, KERN_INFO,
  278. "QUOTA feature is enabled, so ignore jquota_fmt");
  279. sbi->s_jquota_fmt = 0;
  280. }
  281. if (f2fs_sb_has_quota_ino(sbi->sb) && sb_rdonly(sbi->sb)) {
  282. f2fs_msg(sbi->sb, KERN_INFO,
  283. "Filesystem with quota feature cannot be mounted RDWR "
  284. "without CONFIG_QUOTA");
  285. return -1;
  286. }
  287. return 0;
  288. }
  289. #endif
  290. static int parse_options(struct super_block *sb, char *options)
  291. {
  292. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  293. struct request_queue *q;
  294. substring_t args[MAX_OPT_ARGS];
  295. char *p, *name;
  296. int arg = 0;
  297. #ifdef CONFIG_QUOTA
  298. int ret;
  299. #endif
  300. if (!options)
  301. return 0;
  302. while ((p = strsep(&options, ",")) != NULL) {
  303. int token;
  304. if (!*p)
  305. continue;
  306. /*
  307. * Initialize args struct so we know whether arg was
  308. * found; some options take optional arguments.
  309. */
  310. args[0].to = args[0].from = NULL;
  311. token = match_token(p, f2fs_tokens, args);
  312. switch (token) {
  313. case Opt_gc_background:
  314. name = match_strdup(&args[0]);
  315. if (!name)
  316. return -ENOMEM;
  317. if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
  318. set_opt(sbi, BG_GC);
  319. clear_opt(sbi, FORCE_FG_GC);
  320. } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
  321. clear_opt(sbi, BG_GC);
  322. clear_opt(sbi, FORCE_FG_GC);
  323. } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
  324. set_opt(sbi, BG_GC);
  325. set_opt(sbi, FORCE_FG_GC);
  326. } else {
  327. kfree(name);
  328. return -EINVAL;
  329. }
  330. kfree(name);
  331. break;
  332. case Opt_disable_roll_forward:
  333. set_opt(sbi, DISABLE_ROLL_FORWARD);
  334. break;
  335. case Opt_norecovery:
  336. /* this option mounts f2fs with ro */
  337. set_opt(sbi, DISABLE_ROLL_FORWARD);
  338. if (!f2fs_readonly(sb))
  339. return -EINVAL;
  340. break;
  341. case Opt_discard:
  342. q = bdev_get_queue(sb->s_bdev);
  343. if (blk_queue_discard(q)) {
  344. set_opt(sbi, DISCARD);
  345. } else if (!f2fs_sb_mounted_blkzoned(sb)) {
  346. f2fs_msg(sb, KERN_WARNING,
  347. "mounting with \"discard\" option, but "
  348. "the device does not support discard");
  349. }
  350. break;
  351. case Opt_nodiscard:
  352. if (f2fs_sb_mounted_blkzoned(sb)) {
  353. f2fs_msg(sb, KERN_WARNING,
  354. "discard is required for zoned block devices");
  355. return -EINVAL;
  356. }
  357. clear_opt(sbi, DISCARD);
  358. break;
  359. case Opt_noheap:
  360. set_opt(sbi, NOHEAP);
  361. break;
  362. case Opt_heap:
  363. clear_opt(sbi, NOHEAP);
  364. break;
  365. #ifdef CONFIG_F2FS_FS_XATTR
  366. case Opt_user_xattr:
  367. set_opt(sbi, XATTR_USER);
  368. break;
  369. case Opt_nouser_xattr:
  370. clear_opt(sbi, XATTR_USER);
  371. break;
  372. case Opt_inline_xattr:
  373. set_opt(sbi, INLINE_XATTR);
  374. break;
  375. case Opt_noinline_xattr:
  376. clear_opt(sbi, INLINE_XATTR);
  377. break;
  378. case Opt_inline_xattr_size:
  379. if (args->from && match_int(args, &arg))
  380. return -EINVAL;
  381. set_opt(sbi, INLINE_XATTR_SIZE);
  382. sbi->inline_xattr_size = arg;
  383. break;
  384. #else
  385. case Opt_user_xattr:
  386. f2fs_msg(sb, KERN_INFO,
  387. "user_xattr options not supported");
  388. break;
  389. case Opt_nouser_xattr:
  390. f2fs_msg(sb, KERN_INFO,
  391. "nouser_xattr options not supported");
  392. break;
  393. case Opt_inline_xattr:
  394. f2fs_msg(sb, KERN_INFO,
  395. "inline_xattr options not supported");
  396. break;
  397. case Opt_noinline_xattr:
  398. f2fs_msg(sb, KERN_INFO,
  399. "noinline_xattr options not supported");
  400. break;
  401. #endif
  402. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  403. case Opt_acl:
  404. set_opt(sbi, POSIX_ACL);
  405. break;
  406. case Opt_noacl:
  407. clear_opt(sbi, POSIX_ACL);
  408. break;
  409. #else
  410. case Opt_acl:
  411. f2fs_msg(sb, KERN_INFO, "acl options not supported");
  412. break;
  413. case Opt_noacl:
  414. f2fs_msg(sb, KERN_INFO, "noacl options not supported");
  415. break;
  416. #endif
  417. case Opt_active_logs:
  418. if (args->from && match_int(args, &arg))
  419. return -EINVAL;
  420. if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
  421. return -EINVAL;
  422. sbi->active_logs = arg;
  423. break;
  424. case Opt_disable_ext_identify:
  425. set_opt(sbi, DISABLE_EXT_IDENTIFY);
  426. break;
  427. case Opt_inline_data:
  428. set_opt(sbi, INLINE_DATA);
  429. break;
  430. case Opt_inline_dentry:
  431. set_opt(sbi, INLINE_DENTRY);
  432. break;
  433. case Opt_noinline_dentry:
  434. clear_opt(sbi, INLINE_DENTRY);
  435. break;
  436. case Opt_flush_merge:
  437. set_opt(sbi, FLUSH_MERGE);
  438. break;
  439. case Opt_noflush_merge:
  440. clear_opt(sbi, FLUSH_MERGE);
  441. break;
  442. case Opt_nobarrier:
  443. set_opt(sbi, NOBARRIER);
  444. break;
  445. case Opt_fastboot:
  446. set_opt(sbi, FASTBOOT);
  447. break;
  448. case Opt_extent_cache:
  449. set_opt(sbi, EXTENT_CACHE);
  450. break;
  451. case Opt_noextent_cache:
  452. clear_opt(sbi, EXTENT_CACHE);
  453. break;
  454. case Opt_noinline_data:
  455. clear_opt(sbi, INLINE_DATA);
  456. break;
  457. case Opt_data_flush:
  458. set_opt(sbi, DATA_FLUSH);
  459. break;
  460. case Opt_mode:
  461. name = match_strdup(&args[0]);
  462. if (!name)
  463. return -ENOMEM;
  464. if (strlen(name) == 8 &&
  465. !strncmp(name, "adaptive", 8)) {
  466. if (f2fs_sb_mounted_blkzoned(sb)) {
  467. f2fs_msg(sb, KERN_WARNING,
  468. "adaptive mode is not allowed with "
  469. "zoned block device feature");
  470. kfree(name);
  471. return -EINVAL;
  472. }
  473. set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
  474. } else if (strlen(name) == 3 &&
  475. !strncmp(name, "lfs", 3)) {
  476. set_opt_mode(sbi, F2FS_MOUNT_LFS);
  477. } else {
  478. kfree(name);
  479. return -EINVAL;
  480. }
  481. kfree(name);
  482. break;
  483. case Opt_io_size_bits:
  484. if (args->from && match_int(args, &arg))
  485. return -EINVAL;
  486. if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
  487. f2fs_msg(sb, KERN_WARNING,
  488. "Not support %d, larger than %d",
  489. 1 << arg, BIO_MAX_PAGES);
  490. return -EINVAL;
  491. }
  492. sbi->write_io_size_bits = arg;
  493. break;
  494. case Opt_fault_injection:
  495. if (args->from && match_int(args, &arg))
  496. return -EINVAL;
  497. #ifdef CONFIG_F2FS_FAULT_INJECTION
  498. f2fs_build_fault_attr(sbi, arg);
  499. set_opt(sbi, FAULT_INJECTION);
  500. #else
  501. f2fs_msg(sb, KERN_INFO,
  502. "FAULT_INJECTION was not selected");
  503. #endif
  504. break;
  505. case Opt_lazytime:
  506. sb->s_flags |= SB_LAZYTIME;
  507. break;
  508. case Opt_nolazytime:
  509. sb->s_flags &= ~SB_LAZYTIME;
  510. break;
  511. #ifdef CONFIG_QUOTA
  512. case Opt_quota:
  513. case Opt_usrquota:
  514. set_opt(sbi, USRQUOTA);
  515. break;
  516. case Opt_grpquota:
  517. set_opt(sbi, GRPQUOTA);
  518. break;
  519. case Opt_prjquota:
  520. set_opt(sbi, PRJQUOTA);
  521. break;
  522. case Opt_usrjquota:
  523. ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
  524. if (ret)
  525. return ret;
  526. break;
  527. case Opt_grpjquota:
  528. ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
  529. if (ret)
  530. return ret;
  531. break;
  532. case Opt_prjjquota:
  533. ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
  534. if (ret)
  535. return ret;
  536. break;
  537. case Opt_offusrjquota:
  538. ret = f2fs_clear_qf_name(sb, USRQUOTA);
  539. if (ret)
  540. return ret;
  541. break;
  542. case Opt_offgrpjquota:
  543. ret = f2fs_clear_qf_name(sb, GRPQUOTA);
  544. if (ret)
  545. return ret;
  546. break;
  547. case Opt_offprjjquota:
  548. ret = f2fs_clear_qf_name(sb, PRJQUOTA);
  549. if (ret)
  550. return ret;
  551. break;
  552. case Opt_jqfmt_vfsold:
  553. sbi->s_jquota_fmt = QFMT_VFS_OLD;
  554. break;
  555. case Opt_jqfmt_vfsv0:
  556. sbi->s_jquota_fmt = QFMT_VFS_V0;
  557. break;
  558. case Opt_jqfmt_vfsv1:
  559. sbi->s_jquota_fmt = QFMT_VFS_V1;
  560. break;
  561. case Opt_noquota:
  562. clear_opt(sbi, QUOTA);
  563. clear_opt(sbi, USRQUOTA);
  564. clear_opt(sbi, GRPQUOTA);
  565. clear_opt(sbi, PRJQUOTA);
  566. break;
  567. #else
  568. case Opt_quota:
  569. case Opt_usrquota:
  570. case Opt_grpquota:
  571. case Opt_prjquota:
  572. case Opt_usrjquota:
  573. case Opt_grpjquota:
  574. case Opt_prjjquota:
  575. case Opt_offusrjquota:
  576. case Opt_offgrpjquota:
  577. case Opt_offprjjquota:
  578. case Opt_jqfmt_vfsold:
  579. case Opt_jqfmt_vfsv0:
  580. case Opt_jqfmt_vfsv1:
  581. case Opt_noquota:
  582. f2fs_msg(sb, KERN_INFO,
  583. "quota operations not supported");
  584. break;
  585. #endif
  586. default:
  587. f2fs_msg(sb, KERN_ERR,
  588. "Unrecognized mount option \"%s\" or missing value",
  589. p);
  590. return -EINVAL;
  591. }
  592. }
  593. #ifdef CONFIG_QUOTA
  594. if (f2fs_check_quota_options(sbi))
  595. return -EINVAL;
  596. #endif
  597. if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
  598. f2fs_msg(sb, KERN_ERR,
  599. "Should set mode=lfs with %uKB-sized IO",
  600. F2FS_IO_SIZE_KB(sbi));
  601. return -EINVAL;
  602. }
  603. if (test_opt(sbi, INLINE_XATTR_SIZE)) {
  604. if (!test_opt(sbi, INLINE_XATTR)) {
  605. f2fs_msg(sb, KERN_ERR,
  606. "inline_xattr_size option should be "
  607. "set with inline_xattr option");
  608. return -EINVAL;
  609. }
  610. if (!sbi->inline_xattr_size ||
  611. sbi->inline_xattr_size >= DEF_ADDRS_PER_INODE -
  612. F2FS_TOTAL_EXTRA_ATTR_SIZE -
  613. DEF_INLINE_RESERVED_SIZE -
  614. DEF_MIN_INLINE_SIZE) {
  615. f2fs_msg(sb, KERN_ERR,
  616. "inline xattr size is out of range");
  617. return -EINVAL;
  618. }
  619. }
  620. return 0;
  621. }
  622. static struct inode *f2fs_alloc_inode(struct super_block *sb)
  623. {
  624. struct f2fs_inode_info *fi;
  625. fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
  626. if (!fi)
  627. return NULL;
  628. init_once((void *) fi);
  629. /* Initialize f2fs-specific inode info */
  630. atomic_set(&fi->dirty_pages, 0);
  631. fi->i_current_depth = 1;
  632. fi->i_advise = 0;
  633. init_rwsem(&fi->i_sem);
  634. INIT_LIST_HEAD(&fi->dirty_list);
  635. INIT_LIST_HEAD(&fi->gdirty_list);
  636. INIT_LIST_HEAD(&fi->inmem_ilist);
  637. INIT_LIST_HEAD(&fi->inmem_pages);
  638. mutex_init(&fi->inmem_lock);
  639. init_rwsem(&fi->dio_rwsem[READ]);
  640. init_rwsem(&fi->dio_rwsem[WRITE]);
  641. init_rwsem(&fi->i_mmap_sem);
  642. init_rwsem(&fi->i_xattr_sem);
  643. #ifdef CONFIG_QUOTA
  644. memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
  645. fi->i_reserved_quota = 0;
  646. #endif
  647. /* Will be used by directory only */
  648. fi->i_dir_level = F2FS_SB(sb)->dir_level;
  649. return &fi->vfs_inode;
  650. }
  651. static int f2fs_drop_inode(struct inode *inode)
  652. {
  653. int ret;
  654. /*
  655. * This is to avoid a deadlock condition like below.
  656. * writeback_single_inode(inode)
  657. * - f2fs_write_data_page
  658. * - f2fs_gc -> iput -> evict
  659. * - inode_wait_for_writeback(inode)
  660. */
  661. if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
  662. if (!inode->i_nlink && !is_bad_inode(inode)) {
  663. /* to avoid evict_inode call simultaneously */
  664. atomic_inc(&inode->i_count);
  665. spin_unlock(&inode->i_lock);
  666. /* some remained atomic pages should discarded */
  667. if (f2fs_is_atomic_file(inode))
  668. drop_inmem_pages(inode);
  669. /* should remain fi->extent_tree for writepage */
  670. f2fs_destroy_extent_node(inode);
  671. sb_start_intwrite(inode->i_sb);
  672. f2fs_i_size_write(inode, 0);
  673. if (F2FS_HAS_BLOCKS(inode))
  674. f2fs_truncate(inode);
  675. sb_end_intwrite(inode->i_sb);
  676. spin_lock(&inode->i_lock);
  677. atomic_dec(&inode->i_count);
  678. }
  679. trace_f2fs_drop_inode(inode, 0);
  680. return 0;
  681. }
  682. ret = generic_drop_inode(inode);
  683. trace_f2fs_drop_inode(inode, ret);
  684. return ret;
  685. }
  686. int f2fs_inode_dirtied(struct inode *inode, bool sync)
  687. {
  688. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  689. int ret = 0;
  690. spin_lock(&sbi->inode_lock[DIRTY_META]);
  691. if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  692. ret = 1;
  693. } else {
  694. set_inode_flag(inode, FI_DIRTY_INODE);
  695. stat_inc_dirty_inode(sbi, DIRTY_META);
  696. }
  697. if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
  698. list_add_tail(&F2FS_I(inode)->gdirty_list,
  699. &sbi->inode_list[DIRTY_META]);
  700. inc_page_count(sbi, F2FS_DIRTY_IMETA);
  701. }
  702. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  703. return ret;
  704. }
  705. void f2fs_inode_synced(struct inode *inode)
  706. {
  707. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  708. spin_lock(&sbi->inode_lock[DIRTY_META]);
  709. if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  710. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  711. return;
  712. }
  713. if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
  714. list_del_init(&F2FS_I(inode)->gdirty_list);
  715. dec_page_count(sbi, F2FS_DIRTY_IMETA);
  716. }
  717. clear_inode_flag(inode, FI_DIRTY_INODE);
  718. clear_inode_flag(inode, FI_AUTO_RECOVER);
  719. stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
  720. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  721. }
  722. /*
  723. * f2fs_dirty_inode() is called from __mark_inode_dirty()
  724. *
  725. * We should call set_dirty_inode to write the dirty inode through write_inode.
  726. */
  727. static void f2fs_dirty_inode(struct inode *inode, int flags)
  728. {
  729. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  730. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  731. inode->i_ino == F2FS_META_INO(sbi))
  732. return;
  733. if (flags == I_DIRTY_TIME)
  734. return;
  735. if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
  736. clear_inode_flag(inode, FI_AUTO_RECOVER);
  737. f2fs_inode_dirtied(inode, false);
  738. }
  739. static void f2fs_i_callback(struct rcu_head *head)
  740. {
  741. struct inode *inode = container_of(head, struct inode, i_rcu);
  742. kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
  743. }
  744. static void f2fs_destroy_inode(struct inode *inode)
  745. {
  746. call_rcu(&inode->i_rcu, f2fs_i_callback);
  747. }
  748. static void destroy_percpu_info(struct f2fs_sb_info *sbi)
  749. {
  750. percpu_counter_destroy(&sbi->alloc_valid_block_count);
  751. percpu_counter_destroy(&sbi->total_valid_inode_count);
  752. }
  753. static void destroy_device_list(struct f2fs_sb_info *sbi)
  754. {
  755. int i;
  756. for (i = 0; i < sbi->s_ndevs; i++) {
  757. blkdev_put(FDEV(i).bdev, FMODE_EXCL);
  758. #ifdef CONFIG_BLK_DEV_ZONED
  759. kfree(FDEV(i).blkz_type);
  760. #endif
  761. }
  762. kfree(sbi->devs);
  763. }
  764. static void f2fs_put_super(struct super_block *sb)
  765. {
  766. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  767. int i;
  768. bool dropped;
  769. f2fs_quota_off_umount(sb);
  770. /* prevent remaining shrinker jobs */
  771. mutex_lock(&sbi->umount_mutex);
  772. /*
  773. * We don't need to do checkpoint when superblock is clean.
  774. * But, the previous checkpoint was not done by umount, it needs to do
  775. * clean checkpoint again.
  776. */
  777. if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
  778. !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
  779. struct cp_control cpc = {
  780. .reason = CP_UMOUNT,
  781. };
  782. write_checkpoint(sbi, &cpc);
  783. }
  784. /* be sure to wait for any on-going discard commands */
  785. dropped = f2fs_wait_discard_bios(sbi);
  786. if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
  787. struct cp_control cpc = {
  788. .reason = CP_UMOUNT | CP_TRIMMED,
  789. };
  790. write_checkpoint(sbi, &cpc);
  791. }
  792. /* write_checkpoint can update stat informaion */
  793. f2fs_destroy_stats(sbi);
  794. /*
  795. * normally superblock is clean, so we need to release this.
  796. * In addition, EIO will skip do checkpoint, we need this as well.
  797. */
  798. release_ino_entry(sbi, true);
  799. f2fs_leave_shrinker(sbi);
  800. mutex_unlock(&sbi->umount_mutex);
  801. /* our cp_error case, we can wait for any writeback page */
  802. f2fs_flush_merged_writes(sbi);
  803. iput(sbi->node_inode);
  804. iput(sbi->meta_inode);
  805. /* destroy f2fs internal modules */
  806. destroy_node_manager(sbi);
  807. destroy_segment_manager(sbi);
  808. kfree(sbi->ckpt);
  809. f2fs_unregister_sysfs(sbi);
  810. sb->s_fs_info = NULL;
  811. if (sbi->s_chksum_driver)
  812. crypto_free_shash(sbi->s_chksum_driver);
  813. kfree(sbi->raw_super);
  814. destroy_device_list(sbi);
  815. mempool_destroy(sbi->write_io_dummy);
  816. #ifdef CONFIG_QUOTA
  817. for (i = 0; i < MAXQUOTAS; i++)
  818. kfree(sbi->s_qf_names[i]);
  819. #endif
  820. destroy_percpu_info(sbi);
  821. for (i = 0; i < NR_PAGE_TYPE; i++)
  822. kfree(sbi->write_io[i]);
  823. kfree(sbi);
  824. }
  825. int f2fs_sync_fs(struct super_block *sb, int sync)
  826. {
  827. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  828. int err = 0;
  829. if (unlikely(f2fs_cp_error(sbi)))
  830. return 0;
  831. trace_f2fs_sync_fs(sb, sync);
  832. if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  833. return -EAGAIN;
  834. if (sync) {
  835. struct cp_control cpc;
  836. cpc.reason = __get_cp_reason(sbi);
  837. mutex_lock(&sbi->gc_mutex);
  838. err = write_checkpoint(sbi, &cpc);
  839. mutex_unlock(&sbi->gc_mutex);
  840. }
  841. f2fs_trace_ios(NULL, 1);
  842. return err;
  843. }
  844. static int f2fs_freeze(struct super_block *sb)
  845. {
  846. if (f2fs_readonly(sb))
  847. return 0;
  848. /* IO error happened before */
  849. if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
  850. return -EIO;
  851. /* must be clean, since sync_filesystem() was already called */
  852. if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
  853. return -EINVAL;
  854. return 0;
  855. }
  856. static int f2fs_unfreeze(struct super_block *sb)
  857. {
  858. return 0;
  859. }
  860. #ifdef CONFIG_QUOTA
  861. static int f2fs_statfs_project(struct super_block *sb,
  862. kprojid_t projid, struct kstatfs *buf)
  863. {
  864. struct kqid qid;
  865. struct dquot *dquot;
  866. u64 limit;
  867. u64 curblock;
  868. qid = make_kqid_projid(projid);
  869. dquot = dqget(sb, qid);
  870. if (IS_ERR(dquot))
  871. return PTR_ERR(dquot);
  872. spin_lock(&dq_data_lock);
  873. limit = (dquot->dq_dqb.dqb_bsoftlimit ?
  874. dquot->dq_dqb.dqb_bsoftlimit :
  875. dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
  876. if (limit && buf->f_blocks > limit) {
  877. curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
  878. buf->f_blocks = limit;
  879. buf->f_bfree = buf->f_bavail =
  880. (buf->f_blocks > curblock) ?
  881. (buf->f_blocks - curblock) : 0;
  882. }
  883. limit = dquot->dq_dqb.dqb_isoftlimit ?
  884. dquot->dq_dqb.dqb_isoftlimit :
  885. dquot->dq_dqb.dqb_ihardlimit;
  886. if (limit && buf->f_files > limit) {
  887. buf->f_files = limit;
  888. buf->f_ffree =
  889. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  890. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  891. }
  892. spin_unlock(&dq_data_lock);
  893. dqput(dquot);
  894. return 0;
  895. }
  896. #endif
  897. static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
  898. {
  899. struct super_block *sb = dentry->d_sb;
  900. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  901. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  902. block_t total_count, user_block_count, start_count, ovp_count;
  903. u64 avail_node_count;
  904. total_count = le64_to_cpu(sbi->raw_super->block_count);
  905. user_block_count = sbi->user_block_count;
  906. start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
  907. ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
  908. buf->f_type = F2FS_SUPER_MAGIC;
  909. buf->f_bsize = sbi->blocksize;
  910. buf->f_blocks = total_count - start_count;
  911. buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
  912. buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
  913. sbi->current_reserved_blocks;
  914. avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
  915. if (avail_node_count > user_block_count) {
  916. buf->f_files = user_block_count;
  917. buf->f_ffree = buf->f_bavail;
  918. } else {
  919. buf->f_files = avail_node_count;
  920. buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
  921. buf->f_bavail);
  922. }
  923. buf->f_namelen = F2FS_NAME_LEN;
  924. buf->f_fsid.val[0] = (u32)id;
  925. buf->f_fsid.val[1] = (u32)(id >> 32);
  926. #ifdef CONFIG_QUOTA
  927. if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
  928. sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
  929. f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
  930. }
  931. #endif
  932. return 0;
  933. }
  934. static inline void f2fs_show_quota_options(struct seq_file *seq,
  935. struct super_block *sb)
  936. {
  937. #ifdef CONFIG_QUOTA
  938. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  939. if (sbi->s_jquota_fmt) {
  940. char *fmtname = "";
  941. switch (sbi->s_jquota_fmt) {
  942. case QFMT_VFS_OLD:
  943. fmtname = "vfsold";
  944. break;
  945. case QFMT_VFS_V0:
  946. fmtname = "vfsv0";
  947. break;
  948. case QFMT_VFS_V1:
  949. fmtname = "vfsv1";
  950. break;
  951. }
  952. seq_printf(seq, ",jqfmt=%s", fmtname);
  953. }
  954. if (sbi->s_qf_names[USRQUOTA])
  955. seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
  956. if (sbi->s_qf_names[GRPQUOTA])
  957. seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
  958. if (sbi->s_qf_names[PRJQUOTA])
  959. seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]);
  960. #endif
  961. }
  962. static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
  963. {
  964. struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
  965. if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
  966. if (test_opt(sbi, FORCE_FG_GC))
  967. seq_printf(seq, ",background_gc=%s", "sync");
  968. else
  969. seq_printf(seq, ",background_gc=%s", "on");
  970. } else {
  971. seq_printf(seq, ",background_gc=%s", "off");
  972. }
  973. if (test_opt(sbi, DISABLE_ROLL_FORWARD))
  974. seq_puts(seq, ",disable_roll_forward");
  975. if (test_opt(sbi, DISCARD))
  976. seq_puts(seq, ",discard");
  977. if (test_opt(sbi, NOHEAP))
  978. seq_puts(seq, ",no_heap");
  979. else
  980. seq_puts(seq, ",heap");
  981. #ifdef CONFIG_F2FS_FS_XATTR
  982. if (test_opt(sbi, XATTR_USER))
  983. seq_puts(seq, ",user_xattr");
  984. else
  985. seq_puts(seq, ",nouser_xattr");
  986. if (test_opt(sbi, INLINE_XATTR))
  987. seq_puts(seq, ",inline_xattr");
  988. else
  989. seq_puts(seq, ",noinline_xattr");
  990. if (test_opt(sbi, INLINE_XATTR_SIZE))
  991. seq_printf(seq, ",inline_xattr_size=%u",
  992. sbi->inline_xattr_size);
  993. #endif
  994. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  995. if (test_opt(sbi, POSIX_ACL))
  996. seq_puts(seq, ",acl");
  997. else
  998. seq_puts(seq, ",noacl");
  999. #endif
  1000. if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
  1001. seq_puts(seq, ",disable_ext_identify");
  1002. if (test_opt(sbi, INLINE_DATA))
  1003. seq_puts(seq, ",inline_data");
  1004. else
  1005. seq_puts(seq, ",noinline_data");
  1006. if (test_opt(sbi, INLINE_DENTRY))
  1007. seq_puts(seq, ",inline_dentry");
  1008. else
  1009. seq_puts(seq, ",noinline_dentry");
  1010. if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
  1011. seq_puts(seq, ",flush_merge");
  1012. if (test_opt(sbi, NOBARRIER))
  1013. seq_puts(seq, ",nobarrier");
  1014. if (test_opt(sbi, FASTBOOT))
  1015. seq_puts(seq, ",fastboot");
  1016. if (test_opt(sbi, EXTENT_CACHE))
  1017. seq_puts(seq, ",extent_cache");
  1018. else
  1019. seq_puts(seq, ",noextent_cache");
  1020. if (test_opt(sbi, DATA_FLUSH))
  1021. seq_puts(seq, ",data_flush");
  1022. seq_puts(seq, ",mode=");
  1023. if (test_opt(sbi, ADAPTIVE))
  1024. seq_puts(seq, "adaptive");
  1025. else if (test_opt(sbi, LFS))
  1026. seq_puts(seq, "lfs");
  1027. seq_printf(seq, ",active_logs=%u", sbi->active_logs);
  1028. if (F2FS_IO_SIZE_BITS(sbi))
  1029. seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
  1030. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1031. if (test_opt(sbi, FAULT_INJECTION))
  1032. seq_printf(seq, ",fault_injection=%u",
  1033. sbi->fault_info.inject_rate);
  1034. #endif
  1035. #ifdef CONFIG_QUOTA
  1036. if (test_opt(sbi, QUOTA))
  1037. seq_puts(seq, ",quota");
  1038. if (test_opt(sbi, USRQUOTA))
  1039. seq_puts(seq, ",usrquota");
  1040. if (test_opt(sbi, GRPQUOTA))
  1041. seq_puts(seq, ",grpquota");
  1042. if (test_opt(sbi, PRJQUOTA))
  1043. seq_puts(seq, ",prjquota");
  1044. #endif
  1045. f2fs_show_quota_options(seq, sbi->sb);
  1046. return 0;
  1047. }
  1048. static void default_options(struct f2fs_sb_info *sbi)
  1049. {
  1050. /* init some FS parameters */
  1051. sbi->active_logs = NR_CURSEG_TYPE;
  1052. sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
  1053. set_opt(sbi, BG_GC);
  1054. set_opt(sbi, INLINE_XATTR);
  1055. set_opt(sbi, INLINE_DATA);
  1056. set_opt(sbi, INLINE_DENTRY);
  1057. set_opt(sbi, EXTENT_CACHE);
  1058. set_opt(sbi, NOHEAP);
  1059. sbi->sb->s_flags |= SB_LAZYTIME;
  1060. set_opt(sbi, FLUSH_MERGE);
  1061. if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
  1062. set_opt_mode(sbi, F2FS_MOUNT_LFS);
  1063. set_opt(sbi, DISCARD);
  1064. } else {
  1065. set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
  1066. }
  1067. #ifdef CONFIG_F2FS_FS_XATTR
  1068. set_opt(sbi, XATTR_USER);
  1069. #endif
  1070. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  1071. set_opt(sbi, POSIX_ACL);
  1072. #endif
  1073. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1074. f2fs_build_fault_attr(sbi, 0);
  1075. #endif
  1076. }
  1077. #ifdef CONFIG_QUOTA
  1078. static int f2fs_enable_quotas(struct super_block *sb);
  1079. #endif
  1080. static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  1081. {
  1082. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1083. struct f2fs_mount_info org_mount_opt;
  1084. unsigned long old_sb_flags;
  1085. int err, active_logs;
  1086. bool need_restart_gc = false;
  1087. bool need_stop_gc = false;
  1088. bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
  1089. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1090. struct f2fs_fault_info ffi = sbi->fault_info;
  1091. #endif
  1092. #ifdef CONFIG_QUOTA
  1093. int s_jquota_fmt;
  1094. char *s_qf_names[MAXQUOTAS];
  1095. int i, j;
  1096. #endif
  1097. /*
  1098. * Save the old mount options in case we
  1099. * need to restore them.
  1100. */
  1101. org_mount_opt = sbi->mount_opt;
  1102. old_sb_flags = sb->s_flags;
  1103. active_logs = sbi->active_logs;
  1104. #ifdef CONFIG_QUOTA
  1105. s_jquota_fmt = sbi->s_jquota_fmt;
  1106. for (i = 0; i < MAXQUOTAS; i++) {
  1107. if (sbi->s_qf_names[i]) {
  1108. s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
  1109. GFP_KERNEL);
  1110. if (!s_qf_names[i]) {
  1111. for (j = 0; j < i; j++)
  1112. kfree(s_qf_names[j]);
  1113. return -ENOMEM;
  1114. }
  1115. } else {
  1116. s_qf_names[i] = NULL;
  1117. }
  1118. }
  1119. #endif
  1120. /* recover superblocks we couldn't write due to previous RO mount */
  1121. if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
  1122. err = f2fs_commit_super(sbi, false);
  1123. f2fs_msg(sb, KERN_INFO,
  1124. "Try to recover all the superblocks, ret: %d", err);
  1125. if (!err)
  1126. clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  1127. }
  1128. default_options(sbi);
  1129. /* parse mount options */
  1130. err = parse_options(sb, data);
  1131. if (err)
  1132. goto restore_opts;
  1133. /*
  1134. * Previous and new state of filesystem is RO,
  1135. * so skip checking GC and FLUSH_MERGE conditions.
  1136. */
  1137. if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
  1138. goto skip;
  1139. #ifdef CONFIG_QUOTA
  1140. if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
  1141. err = dquot_suspend(sb, -1);
  1142. if (err < 0)
  1143. goto restore_opts;
  1144. } else {
  1145. /* dquot_resume needs RW */
  1146. sb->s_flags &= ~SB_RDONLY;
  1147. if (sb_any_quota_suspended(sb)) {
  1148. dquot_resume(sb, -1);
  1149. } else if (f2fs_sb_has_quota_ino(sb)) {
  1150. err = f2fs_enable_quotas(sb);
  1151. if (err)
  1152. goto restore_opts;
  1153. }
  1154. }
  1155. #endif
  1156. /* disallow enable/disable extent_cache dynamically */
  1157. if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
  1158. err = -EINVAL;
  1159. f2fs_msg(sbi->sb, KERN_WARNING,
  1160. "switch extent_cache option is not allowed");
  1161. goto restore_opts;
  1162. }
  1163. /*
  1164. * We stop the GC thread if FS is mounted as RO
  1165. * or if background_gc = off is passed in mount
  1166. * option. Also sync the filesystem.
  1167. */
  1168. if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
  1169. if (sbi->gc_thread) {
  1170. stop_gc_thread(sbi);
  1171. need_restart_gc = true;
  1172. }
  1173. } else if (!sbi->gc_thread) {
  1174. err = start_gc_thread(sbi);
  1175. if (err)
  1176. goto restore_opts;
  1177. need_stop_gc = true;
  1178. }
  1179. if (*flags & SB_RDONLY) {
  1180. writeback_inodes_sb(sb, WB_REASON_SYNC);
  1181. sync_inodes_sb(sb);
  1182. set_sbi_flag(sbi, SBI_IS_DIRTY);
  1183. set_sbi_flag(sbi, SBI_IS_CLOSE);
  1184. f2fs_sync_fs(sb, 1);
  1185. clear_sbi_flag(sbi, SBI_IS_CLOSE);
  1186. }
  1187. /*
  1188. * We stop issue flush thread if FS is mounted as RO
  1189. * or if flush_merge is not passed in mount option.
  1190. */
  1191. if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
  1192. clear_opt(sbi, FLUSH_MERGE);
  1193. destroy_flush_cmd_control(sbi, false);
  1194. } else {
  1195. err = create_flush_cmd_control(sbi);
  1196. if (err)
  1197. goto restore_gc;
  1198. }
  1199. skip:
  1200. #ifdef CONFIG_QUOTA
  1201. /* Release old quota file names */
  1202. for (i = 0; i < MAXQUOTAS; i++)
  1203. kfree(s_qf_names[i]);
  1204. #endif
  1205. /* Update the POSIXACL Flag */
  1206. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  1207. (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
  1208. return 0;
  1209. restore_gc:
  1210. if (need_restart_gc) {
  1211. if (start_gc_thread(sbi))
  1212. f2fs_msg(sbi->sb, KERN_WARNING,
  1213. "background gc thread has stopped");
  1214. } else if (need_stop_gc) {
  1215. stop_gc_thread(sbi);
  1216. }
  1217. restore_opts:
  1218. #ifdef CONFIG_QUOTA
  1219. sbi->s_jquota_fmt = s_jquota_fmt;
  1220. for (i = 0; i < MAXQUOTAS; i++) {
  1221. kfree(sbi->s_qf_names[i]);
  1222. sbi->s_qf_names[i] = s_qf_names[i];
  1223. }
  1224. #endif
  1225. sbi->mount_opt = org_mount_opt;
  1226. sbi->active_logs = active_logs;
  1227. sb->s_flags = old_sb_flags;
  1228. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1229. sbi->fault_info = ffi;
  1230. #endif
  1231. return err;
  1232. }
  1233. #ifdef CONFIG_QUOTA
  1234. /* Read data from quotafile */
  1235. static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
  1236. size_t len, loff_t off)
  1237. {
  1238. struct inode *inode = sb_dqopt(sb)->files[type];
  1239. struct address_space *mapping = inode->i_mapping;
  1240. block_t blkidx = F2FS_BYTES_TO_BLK(off);
  1241. int offset = off & (sb->s_blocksize - 1);
  1242. int tocopy;
  1243. size_t toread;
  1244. loff_t i_size = i_size_read(inode);
  1245. struct page *page;
  1246. char *kaddr;
  1247. if (off > i_size)
  1248. return 0;
  1249. if (off + len > i_size)
  1250. len = i_size - off;
  1251. toread = len;
  1252. while (toread > 0) {
  1253. tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
  1254. repeat:
  1255. page = read_mapping_page(mapping, blkidx, NULL);
  1256. if (IS_ERR(page)) {
  1257. if (PTR_ERR(page) == -ENOMEM) {
  1258. congestion_wait(BLK_RW_ASYNC, HZ/50);
  1259. goto repeat;
  1260. }
  1261. return PTR_ERR(page);
  1262. }
  1263. lock_page(page);
  1264. if (unlikely(page->mapping != mapping)) {
  1265. f2fs_put_page(page, 1);
  1266. goto repeat;
  1267. }
  1268. if (unlikely(!PageUptodate(page))) {
  1269. f2fs_put_page(page, 1);
  1270. return -EIO;
  1271. }
  1272. kaddr = kmap_atomic(page);
  1273. memcpy(data, kaddr + offset, tocopy);
  1274. kunmap_atomic(kaddr);
  1275. f2fs_put_page(page, 1);
  1276. offset = 0;
  1277. toread -= tocopy;
  1278. data += tocopy;
  1279. blkidx++;
  1280. }
  1281. return len;
  1282. }
  1283. /* Write to quotafile */
  1284. static ssize_t f2fs_quota_write(struct super_block *sb, int type,
  1285. const char *data, size_t len, loff_t off)
  1286. {
  1287. struct inode *inode = sb_dqopt(sb)->files[type];
  1288. struct address_space *mapping = inode->i_mapping;
  1289. const struct address_space_operations *a_ops = mapping->a_ops;
  1290. int offset = off & (sb->s_blocksize - 1);
  1291. size_t towrite = len;
  1292. struct page *page;
  1293. char *kaddr;
  1294. int err = 0;
  1295. int tocopy;
  1296. while (towrite > 0) {
  1297. tocopy = min_t(unsigned long, sb->s_blocksize - offset,
  1298. towrite);
  1299. retry:
  1300. err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
  1301. &page, NULL);
  1302. if (unlikely(err)) {
  1303. if (err == -ENOMEM) {
  1304. congestion_wait(BLK_RW_ASYNC, HZ/50);
  1305. goto retry;
  1306. }
  1307. break;
  1308. }
  1309. kaddr = kmap_atomic(page);
  1310. memcpy(kaddr + offset, data, tocopy);
  1311. kunmap_atomic(kaddr);
  1312. flush_dcache_page(page);
  1313. a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
  1314. page, NULL);
  1315. offset = 0;
  1316. towrite -= tocopy;
  1317. off += tocopy;
  1318. data += tocopy;
  1319. cond_resched();
  1320. }
  1321. if (len == towrite)
  1322. return err;
  1323. inode->i_mtime = inode->i_ctime = current_time(inode);
  1324. f2fs_mark_inode_dirty_sync(inode, false);
  1325. return len - towrite;
  1326. }
  1327. static struct dquot **f2fs_get_dquots(struct inode *inode)
  1328. {
  1329. return F2FS_I(inode)->i_dquot;
  1330. }
  1331. static qsize_t *f2fs_get_reserved_space(struct inode *inode)
  1332. {
  1333. return &F2FS_I(inode)->i_reserved_quota;
  1334. }
  1335. static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
  1336. {
  1337. return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type],
  1338. sbi->s_jquota_fmt, type);
  1339. }
  1340. int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
  1341. {
  1342. int enabled = 0;
  1343. int i, err;
  1344. if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
  1345. err = f2fs_enable_quotas(sbi->sb);
  1346. if (err) {
  1347. f2fs_msg(sbi->sb, KERN_ERR,
  1348. "Cannot turn on quota_ino: %d", err);
  1349. return 0;
  1350. }
  1351. return 1;
  1352. }
  1353. for (i = 0; i < MAXQUOTAS; i++) {
  1354. if (sbi->s_qf_names[i]) {
  1355. err = f2fs_quota_on_mount(sbi, i);
  1356. if (!err) {
  1357. enabled = 1;
  1358. continue;
  1359. }
  1360. f2fs_msg(sbi->sb, KERN_ERR,
  1361. "Cannot turn on quotas: %d on %d", err, i);
  1362. }
  1363. }
  1364. return enabled;
  1365. }
  1366. static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
  1367. unsigned int flags)
  1368. {
  1369. struct inode *qf_inode;
  1370. unsigned long qf_inum;
  1371. int err;
  1372. BUG_ON(!f2fs_sb_has_quota_ino(sb));
  1373. qf_inum = f2fs_qf_ino(sb, type);
  1374. if (!qf_inum)
  1375. return -EPERM;
  1376. qf_inode = f2fs_iget(sb, qf_inum);
  1377. if (IS_ERR(qf_inode)) {
  1378. f2fs_msg(sb, KERN_ERR,
  1379. "Bad quota inode %u:%lu", type, qf_inum);
  1380. return PTR_ERR(qf_inode);
  1381. }
  1382. /* Don't account quota for quota files to avoid recursion */
  1383. qf_inode->i_flags |= S_NOQUOTA;
  1384. err = dquot_enable(qf_inode, type, format_id, flags);
  1385. iput(qf_inode);
  1386. return err;
  1387. }
  1388. static int f2fs_enable_quotas(struct super_block *sb)
  1389. {
  1390. int type, err = 0;
  1391. unsigned long qf_inum;
  1392. bool quota_mopt[MAXQUOTAS] = {
  1393. test_opt(F2FS_SB(sb), USRQUOTA),
  1394. test_opt(F2FS_SB(sb), GRPQUOTA),
  1395. test_opt(F2FS_SB(sb), PRJQUOTA),
  1396. };
  1397. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
  1398. for (type = 0; type < MAXQUOTAS; type++) {
  1399. qf_inum = f2fs_qf_ino(sb, type);
  1400. if (qf_inum) {
  1401. err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
  1402. DQUOT_USAGE_ENABLED |
  1403. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  1404. if (err) {
  1405. f2fs_msg(sb, KERN_ERR,
  1406. "Failed to enable quota tracking "
  1407. "(type=%d, err=%d). Please run "
  1408. "fsck to fix.", type, err);
  1409. for (type--; type >= 0; type--)
  1410. dquot_quota_off(sb, type);
  1411. return err;
  1412. }
  1413. }
  1414. }
  1415. return 0;
  1416. }
  1417. static int f2fs_quota_sync(struct super_block *sb, int type)
  1418. {
  1419. struct quota_info *dqopt = sb_dqopt(sb);
  1420. int cnt;
  1421. int ret;
  1422. ret = dquot_writeback_dquots(sb, type);
  1423. if (ret)
  1424. return ret;
  1425. /*
  1426. * Now when everything is written we can discard the pagecache so
  1427. * that userspace sees the changes.
  1428. */
  1429. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1430. if (type != -1 && cnt != type)
  1431. continue;
  1432. if (!sb_has_quota_active(sb, cnt))
  1433. continue;
  1434. ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
  1435. if (ret)
  1436. return ret;
  1437. inode_lock(dqopt->files[cnt]);
  1438. truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
  1439. inode_unlock(dqopt->files[cnt]);
  1440. }
  1441. return 0;
  1442. }
  1443. static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
  1444. const struct path *path)
  1445. {
  1446. struct inode *inode;
  1447. int err;
  1448. err = f2fs_quota_sync(sb, type);
  1449. if (err)
  1450. return err;
  1451. err = dquot_quota_on(sb, type, format_id, path);
  1452. if (err)
  1453. return err;
  1454. inode = d_inode(path->dentry);
  1455. inode_lock(inode);
  1456. F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
  1457. inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
  1458. S_NOATIME | S_IMMUTABLE);
  1459. inode_unlock(inode);
  1460. f2fs_mark_inode_dirty_sync(inode, false);
  1461. return 0;
  1462. }
  1463. static int f2fs_quota_off(struct super_block *sb, int type)
  1464. {
  1465. struct inode *inode = sb_dqopt(sb)->files[type];
  1466. int err;
  1467. if (!inode || !igrab(inode))
  1468. return dquot_quota_off(sb, type);
  1469. f2fs_quota_sync(sb, type);
  1470. err = dquot_quota_off(sb, type);
  1471. if (err || f2fs_sb_has_quota_ino(sb))
  1472. goto out_put;
  1473. inode_lock(inode);
  1474. F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
  1475. inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
  1476. inode_unlock(inode);
  1477. f2fs_mark_inode_dirty_sync(inode, false);
  1478. out_put:
  1479. iput(inode);
  1480. return err;
  1481. }
  1482. void f2fs_quota_off_umount(struct super_block *sb)
  1483. {
  1484. int type;
  1485. for (type = 0; type < MAXQUOTAS; type++)
  1486. f2fs_quota_off(sb, type);
  1487. }
  1488. int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
  1489. {
  1490. *projid = F2FS_I(inode)->i_projid;
  1491. return 0;
  1492. }
  1493. static const struct dquot_operations f2fs_quota_operations = {
  1494. .get_reserved_space = f2fs_get_reserved_space,
  1495. .write_dquot = dquot_commit,
  1496. .acquire_dquot = dquot_acquire,
  1497. .release_dquot = dquot_release,
  1498. .mark_dirty = dquot_mark_dquot_dirty,
  1499. .write_info = dquot_commit_info,
  1500. .alloc_dquot = dquot_alloc,
  1501. .destroy_dquot = dquot_destroy,
  1502. .get_projid = f2fs_get_projid,
  1503. .get_next_id = dquot_get_next_id,
  1504. };
  1505. static const struct quotactl_ops f2fs_quotactl_ops = {
  1506. .quota_on = f2fs_quota_on,
  1507. .quota_off = f2fs_quota_off,
  1508. .quota_sync = f2fs_quota_sync,
  1509. .get_state = dquot_get_state,
  1510. .set_info = dquot_set_dqinfo,
  1511. .get_dqblk = dquot_get_dqblk,
  1512. .set_dqblk = dquot_set_dqblk,
  1513. .get_nextdqblk = dquot_get_next_dqblk,
  1514. };
  1515. #else
  1516. void f2fs_quota_off_umount(struct super_block *sb)
  1517. {
  1518. }
  1519. #endif
  1520. static const struct super_operations f2fs_sops = {
  1521. .alloc_inode = f2fs_alloc_inode,
  1522. .drop_inode = f2fs_drop_inode,
  1523. .destroy_inode = f2fs_destroy_inode,
  1524. .write_inode = f2fs_write_inode,
  1525. .dirty_inode = f2fs_dirty_inode,
  1526. .show_options = f2fs_show_options,
  1527. #ifdef CONFIG_QUOTA
  1528. .quota_read = f2fs_quota_read,
  1529. .quota_write = f2fs_quota_write,
  1530. .get_dquots = f2fs_get_dquots,
  1531. #endif
  1532. .evict_inode = f2fs_evict_inode,
  1533. .put_super = f2fs_put_super,
  1534. .sync_fs = f2fs_sync_fs,
  1535. .freeze_fs = f2fs_freeze,
  1536. .unfreeze_fs = f2fs_unfreeze,
  1537. .statfs = f2fs_statfs,
  1538. .remount_fs = f2fs_remount,
  1539. };
  1540. #ifdef CONFIG_F2FS_FS_ENCRYPTION
  1541. static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
  1542. {
  1543. return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  1544. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  1545. ctx, len, NULL);
  1546. }
  1547. static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
  1548. void *fs_data)
  1549. {
  1550. return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  1551. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  1552. ctx, len, fs_data, XATTR_CREATE);
  1553. }
  1554. static unsigned f2fs_max_namelen(struct inode *inode)
  1555. {
  1556. return S_ISLNK(inode->i_mode) ?
  1557. inode->i_sb->s_blocksize : F2FS_NAME_LEN;
  1558. }
  1559. static const struct fscrypt_operations f2fs_cryptops = {
  1560. .key_prefix = "f2fs:",
  1561. .get_context = f2fs_get_context,
  1562. .set_context = f2fs_set_context,
  1563. .empty_dir = f2fs_empty_dir,
  1564. .max_namelen = f2fs_max_namelen,
  1565. };
  1566. #endif
  1567. static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
  1568. u64 ino, u32 generation)
  1569. {
  1570. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1571. struct inode *inode;
  1572. if (check_nid_range(sbi, ino))
  1573. return ERR_PTR(-ESTALE);
  1574. /*
  1575. * f2fs_iget isn't quite right if the inode is currently unallocated!
  1576. * However f2fs_iget currently does appropriate checks to handle stale
  1577. * inodes so everything is OK.
  1578. */
  1579. inode = f2fs_iget(sb, ino);
  1580. if (IS_ERR(inode))
  1581. return ERR_CAST(inode);
  1582. if (unlikely(generation && inode->i_generation != generation)) {
  1583. /* we didn't find the right inode.. */
  1584. iput(inode);
  1585. return ERR_PTR(-ESTALE);
  1586. }
  1587. return inode;
  1588. }
  1589. static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
  1590. int fh_len, int fh_type)
  1591. {
  1592. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  1593. f2fs_nfs_get_inode);
  1594. }
  1595. static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
  1596. int fh_len, int fh_type)
  1597. {
  1598. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  1599. f2fs_nfs_get_inode);
  1600. }
  1601. static const struct export_operations f2fs_export_ops = {
  1602. .fh_to_dentry = f2fs_fh_to_dentry,
  1603. .fh_to_parent = f2fs_fh_to_parent,
  1604. .get_parent = f2fs_get_parent,
  1605. };
  1606. static loff_t max_file_blocks(void)
  1607. {
  1608. loff_t result = 0;
  1609. loff_t leaf_count = ADDRS_PER_BLOCK;
  1610. /*
  1611. * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
  1612. * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
  1613. * space in inode.i_addr, it will be more safe to reassign
  1614. * result as zero.
  1615. */
  1616. /* two direct node blocks */
  1617. result += (leaf_count * 2);
  1618. /* two indirect node blocks */
  1619. leaf_count *= NIDS_PER_BLOCK;
  1620. result += (leaf_count * 2);
  1621. /* one double indirect node block */
  1622. leaf_count *= NIDS_PER_BLOCK;
  1623. result += leaf_count;
  1624. return result;
  1625. }
  1626. static int __f2fs_commit_super(struct buffer_head *bh,
  1627. struct f2fs_super_block *super)
  1628. {
  1629. lock_buffer(bh);
  1630. if (super)
  1631. memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
  1632. set_buffer_uptodate(bh);
  1633. set_buffer_dirty(bh);
  1634. unlock_buffer(bh);
  1635. /* it's rare case, we can do fua all the time */
  1636. return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
  1637. }
  1638. static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
  1639. struct buffer_head *bh)
  1640. {
  1641. struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
  1642. (bh->b_data + F2FS_SUPER_OFFSET);
  1643. struct super_block *sb = sbi->sb;
  1644. u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
  1645. u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
  1646. u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
  1647. u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
  1648. u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
  1649. u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
  1650. u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
  1651. u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
  1652. u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
  1653. u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
  1654. u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
  1655. u32 segment_count = le32_to_cpu(raw_super->segment_count);
  1656. u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  1657. u64 main_end_blkaddr = main_blkaddr +
  1658. (segment_count_main << log_blocks_per_seg);
  1659. u64 seg_end_blkaddr = segment0_blkaddr +
  1660. (segment_count << log_blocks_per_seg);
  1661. if (segment0_blkaddr != cp_blkaddr) {
  1662. f2fs_msg(sb, KERN_INFO,
  1663. "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
  1664. segment0_blkaddr, cp_blkaddr);
  1665. return true;
  1666. }
  1667. if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
  1668. sit_blkaddr) {
  1669. f2fs_msg(sb, KERN_INFO,
  1670. "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
  1671. cp_blkaddr, sit_blkaddr,
  1672. segment_count_ckpt << log_blocks_per_seg);
  1673. return true;
  1674. }
  1675. if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
  1676. nat_blkaddr) {
  1677. f2fs_msg(sb, KERN_INFO,
  1678. "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
  1679. sit_blkaddr, nat_blkaddr,
  1680. segment_count_sit << log_blocks_per_seg);
  1681. return true;
  1682. }
  1683. if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
  1684. ssa_blkaddr) {
  1685. f2fs_msg(sb, KERN_INFO,
  1686. "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
  1687. nat_blkaddr, ssa_blkaddr,
  1688. segment_count_nat << log_blocks_per_seg);
  1689. return true;
  1690. }
  1691. if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
  1692. main_blkaddr) {
  1693. f2fs_msg(sb, KERN_INFO,
  1694. "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
  1695. ssa_blkaddr, main_blkaddr,
  1696. segment_count_ssa << log_blocks_per_seg);
  1697. return true;
  1698. }
  1699. if (main_end_blkaddr > seg_end_blkaddr) {
  1700. f2fs_msg(sb, KERN_INFO,
  1701. "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
  1702. main_blkaddr,
  1703. segment0_blkaddr +
  1704. (segment_count << log_blocks_per_seg),
  1705. segment_count_main << log_blocks_per_seg);
  1706. return true;
  1707. } else if (main_end_blkaddr < seg_end_blkaddr) {
  1708. int err = 0;
  1709. char *res;
  1710. /* fix in-memory information all the time */
  1711. raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
  1712. segment0_blkaddr) >> log_blocks_per_seg);
  1713. if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
  1714. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  1715. res = "internally";
  1716. } else {
  1717. err = __f2fs_commit_super(bh, NULL);
  1718. res = err ? "failed" : "done";
  1719. }
  1720. f2fs_msg(sb, KERN_INFO,
  1721. "Fix alignment : %s, start(%u) end(%u) block(%u)",
  1722. res, main_blkaddr,
  1723. segment0_blkaddr +
  1724. (segment_count << log_blocks_per_seg),
  1725. segment_count_main << log_blocks_per_seg);
  1726. if (err)
  1727. return true;
  1728. }
  1729. return false;
  1730. }
  1731. static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
  1732. struct buffer_head *bh)
  1733. {
  1734. struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
  1735. (bh->b_data + F2FS_SUPER_OFFSET);
  1736. struct super_block *sb = sbi->sb;
  1737. unsigned int blocksize;
  1738. if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
  1739. f2fs_msg(sb, KERN_INFO,
  1740. "Magic Mismatch, valid(0x%x) - read(0x%x)",
  1741. F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
  1742. return 1;
  1743. }
  1744. /* Currently, support only 4KB page cache size */
  1745. if (F2FS_BLKSIZE != PAGE_SIZE) {
  1746. f2fs_msg(sb, KERN_INFO,
  1747. "Invalid page_cache_size (%lu), supports only 4KB\n",
  1748. PAGE_SIZE);
  1749. return 1;
  1750. }
  1751. /* Currently, support only 4KB block size */
  1752. blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
  1753. if (blocksize != F2FS_BLKSIZE) {
  1754. f2fs_msg(sb, KERN_INFO,
  1755. "Invalid blocksize (%u), supports only 4KB\n",
  1756. blocksize);
  1757. return 1;
  1758. }
  1759. /* check log blocks per segment */
  1760. if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
  1761. f2fs_msg(sb, KERN_INFO,
  1762. "Invalid log blocks per segment (%u)\n",
  1763. le32_to_cpu(raw_super->log_blocks_per_seg));
  1764. return 1;
  1765. }
  1766. /* Currently, support 512/1024/2048/4096 bytes sector size */
  1767. if (le32_to_cpu(raw_super->log_sectorsize) >
  1768. F2FS_MAX_LOG_SECTOR_SIZE ||
  1769. le32_to_cpu(raw_super->log_sectorsize) <
  1770. F2FS_MIN_LOG_SECTOR_SIZE) {
  1771. f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
  1772. le32_to_cpu(raw_super->log_sectorsize));
  1773. return 1;
  1774. }
  1775. if (le32_to_cpu(raw_super->log_sectors_per_block) +
  1776. le32_to_cpu(raw_super->log_sectorsize) !=
  1777. F2FS_MAX_LOG_SECTOR_SIZE) {
  1778. f2fs_msg(sb, KERN_INFO,
  1779. "Invalid log sectors per block(%u) log sectorsize(%u)",
  1780. le32_to_cpu(raw_super->log_sectors_per_block),
  1781. le32_to_cpu(raw_super->log_sectorsize));
  1782. return 1;
  1783. }
  1784. /* check reserved ino info */
  1785. if (le32_to_cpu(raw_super->node_ino) != 1 ||
  1786. le32_to_cpu(raw_super->meta_ino) != 2 ||
  1787. le32_to_cpu(raw_super->root_ino) != 3) {
  1788. f2fs_msg(sb, KERN_INFO,
  1789. "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
  1790. le32_to_cpu(raw_super->node_ino),
  1791. le32_to_cpu(raw_super->meta_ino),
  1792. le32_to_cpu(raw_super->root_ino));
  1793. return 1;
  1794. }
  1795. if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
  1796. f2fs_msg(sb, KERN_INFO,
  1797. "Invalid segment count (%u)",
  1798. le32_to_cpu(raw_super->segment_count));
  1799. return 1;
  1800. }
  1801. /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
  1802. if (sanity_check_area_boundary(sbi, bh))
  1803. return 1;
  1804. return 0;
  1805. }
  1806. int sanity_check_ckpt(struct f2fs_sb_info *sbi)
  1807. {
  1808. unsigned int total, fsmeta;
  1809. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  1810. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  1811. unsigned int ovp_segments, reserved_segments;
  1812. unsigned int main_segs, blocks_per_seg;
  1813. int i;
  1814. total = le32_to_cpu(raw_super->segment_count);
  1815. fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
  1816. fsmeta += le32_to_cpu(raw_super->segment_count_sit);
  1817. fsmeta += le32_to_cpu(raw_super->segment_count_nat);
  1818. fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
  1819. fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
  1820. if (unlikely(fsmeta >= total))
  1821. return 1;
  1822. ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
  1823. reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
  1824. if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
  1825. ovp_segments == 0 || reserved_segments == 0)) {
  1826. f2fs_msg(sbi->sb, KERN_ERR,
  1827. "Wrong layout: check mkfs.f2fs version");
  1828. return 1;
  1829. }
  1830. main_segs = le32_to_cpu(raw_super->segment_count_main);
  1831. blocks_per_seg = sbi->blocks_per_seg;
  1832. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  1833. if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
  1834. le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
  1835. return 1;
  1836. }
  1837. for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
  1838. if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
  1839. le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
  1840. return 1;
  1841. }
  1842. if (unlikely(f2fs_cp_error(sbi))) {
  1843. f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
  1844. return 1;
  1845. }
  1846. return 0;
  1847. }
  1848. static void init_sb_info(struct f2fs_sb_info *sbi)
  1849. {
  1850. struct f2fs_super_block *raw_super = sbi->raw_super;
  1851. int i, j;
  1852. sbi->log_sectors_per_block =
  1853. le32_to_cpu(raw_super->log_sectors_per_block);
  1854. sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
  1855. sbi->blocksize = 1 << sbi->log_blocksize;
  1856. sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  1857. sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
  1858. sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  1859. sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  1860. sbi->total_sections = le32_to_cpu(raw_super->section_count);
  1861. sbi->total_node_count =
  1862. (le32_to_cpu(raw_super->segment_count_nat) / 2)
  1863. * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
  1864. sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
  1865. sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
  1866. sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
  1867. sbi->cur_victim_sec = NULL_SECNO;
  1868. sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
  1869. sbi->dir_level = DEF_DIR_LEVEL;
  1870. sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
  1871. sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
  1872. clear_sbi_flag(sbi, SBI_NEED_FSCK);
  1873. for (i = 0; i < NR_COUNT_TYPE; i++)
  1874. atomic_set(&sbi->nr_pages[i], 0);
  1875. atomic_set(&sbi->wb_sync_req, 0);
  1876. INIT_LIST_HEAD(&sbi->s_list);
  1877. mutex_init(&sbi->umount_mutex);
  1878. for (i = 0; i < NR_PAGE_TYPE - 1; i++)
  1879. for (j = HOT; j < NR_TEMP_TYPE; j++)
  1880. mutex_init(&sbi->wio_mutex[i][j]);
  1881. spin_lock_init(&sbi->cp_lock);
  1882. sbi->dirty_device = 0;
  1883. spin_lock_init(&sbi->dev_lock);
  1884. }
  1885. static int init_percpu_info(struct f2fs_sb_info *sbi)
  1886. {
  1887. int err;
  1888. err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
  1889. if (err)
  1890. return err;
  1891. return percpu_counter_init(&sbi->total_valid_inode_count, 0,
  1892. GFP_KERNEL);
  1893. }
  1894. #ifdef CONFIG_BLK_DEV_ZONED
  1895. static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
  1896. {
  1897. struct block_device *bdev = FDEV(devi).bdev;
  1898. sector_t nr_sectors = bdev->bd_part->nr_sects;
  1899. sector_t sector = 0;
  1900. struct blk_zone *zones;
  1901. unsigned int i, nr_zones;
  1902. unsigned int n = 0;
  1903. int err = -EIO;
  1904. if (!f2fs_sb_mounted_blkzoned(sbi->sb))
  1905. return 0;
  1906. if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
  1907. SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
  1908. return -EINVAL;
  1909. sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
  1910. if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
  1911. __ilog2_u32(sbi->blocks_per_blkz))
  1912. return -EINVAL;
  1913. sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
  1914. FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
  1915. sbi->log_blocks_per_blkz;
  1916. if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
  1917. FDEV(devi).nr_blkz++;
  1918. FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
  1919. if (!FDEV(devi).blkz_type)
  1920. return -ENOMEM;
  1921. #define F2FS_REPORT_NR_ZONES 4096
  1922. zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
  1923. GFP_KERNEL);
  1924. if (!zones)
  1925. return -ENOMEM;
  1926. /* Get block zones type */
  1927. while (zones && sector < nr_sectors) {
  1928. nr_zones = F2FS_REPORT_NR_ZONES;
  1929. err = blkdev_report_zones(bdev, sector,
  1930. zones, &nr_zones,
  1931. GFP_KERNEL);
  1932. if (err)
  1933. break;
  1934. if (!nr_zones) {
  1935. err = -EIO;
  1936. break;
  1937. }
  1938. for (i = 0; i < nr_zones; i++) {
  1939. FDEV(devi).blkz_type[n] = zones[i].type;
  1940. sector += zones[i].len;
  1941. n++;
  1942. }
  1943. }
  1944. kfree(zones);
  1945. return err;
  1946. }
  1947. #endif
  1948. /*
  1949. * Read f2fs raw super block.
  1950. * Because we have two copies of super block, so read both of them
  1951. * to get the first valid one. If any one of them is broken, we pass
  1952. * them recovery flag back to the caller.
  1953. */
  1954. static int read_raw_super_block(struct f2fs_sb_info *sbi,
  1955. struct f2fs_super_block **raw_super,
  1956. int *valid_super_block, int *recovery)
  1957. {
  1958. struct super_block *sb = sbi->sb;
  1959. int block;
  1960. struct buffer_head *bh;
  1961. struct f2fs_super_block *super;
  1962. int err = 0;
  1963. super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
  1964. if (!super)
  1965. return -ENOMEM;
  1966. for (block = 0; block < 2; block++) {
  1967. bh = sb_bread(sb, block);
  1968. if (!bh) {
  1969. f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
  1970. block + 1);
  1971. err = -EIO;
  1972. continue;
  1973. }
  1974. /* sanity checking of raw super */
  1975. if (sanity_check_raw_super(sbi, bh)) {
  1976. f2fs_msg(sb, KERN_ERR,
  1977. "Can't find valid F2FS filesystem in %dth superblock",
  1978. block + 1);
  1979. err = -EINVAL;
  1980. brelse(bh);
  1981. continue;
  1982. }
  1983. if (!*raw_super) {
  1984. memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
  1985. sizeof(*super));
  1986. *valid_super_block = block;
  1987. *raw_super = super;
  1988. }
  1989. brelse(bh);
  1990. }
  1991. /* Fail to read any one of the superblocks*/
  1992. if (err < 0)
  1993. *recovery = 1;
  1994. /* No valid superblock */
  1995. if (!*raw_super)
  1996. kfree(super);
  1997. else
  1998. err = 0;
  1999. return err;
  2000. }
  2001. int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
  2002. {
  2003. struct buffer_head *bh;
  2004. int err;
  2005. if ((recover && f2fs_readonly(sbi->sb)) ||
  2006. bdev_read_only(sbi->sb->s_bdev)) {
  2007. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  2008. return -EROFS;
  2009. }
  2010. /* write back-up superblock first */
  2011. bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
  2012. if (!bh)
  2013. return -EIO;
  2014. err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
  2015. brelse(bh);
  2016. /* if we are in recovery path, skip writing valid superblock */
  2017. if (recover || err)
  2018. return err;
  2019. /* write current valid superblock */
  2020. bh = sb_getblk(sbi->sb, sbi->valid_super_block);
  2021. if (!bh)
  2022. return -EIO;
  2023. err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
  2024. brelse(bh);
  2025. return err;
  2026. }
  2027. static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
  2028. {
  2029. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  2030. unsigned int max_devices = MAX_DEVICES;
  2031. int i;
  2032. /* Initialize single device information */
  2033. if (!RDEV(0).path[0]) {
  2034. if (!bdev_is_zoned(sbi->sb->s_bdev))
  2035. return 0;
  2036. max_devices = 1;
  2037. }
  2038. /*
  2039. * Initialize multiple devices information, or single
  2040. * zoned block device information.
  2041. */
  2042. sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
  2043. GFP_KERNEL);
  2044. if (!sbi->devs)
  2045. return -ENOMEM;
  2046. for (i = 0; i < max_devices; i++) {
  2047. if (i > 0 && !RDEV(i).path[0])
  2048. break;
  2049. if (max_devices == 1) {
  2050. /* Single zoned block device mount */
  2051. FDEV(0).bdev =
  2052. blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
  2053. sbi->sb->s_mode, sbi->sb->s_type);
  2054. } else {
  2055. /* Multi-device mount */
  2056. memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
  2057. FDEV(i).total_segments =
  2058. le32_to_cpu(RDEV(i).total_segments);
  2059. if (i == 0) {
  2060. FDEV(i).start_blk = 0;
  2061. FDEV(i).end_blk = FDEV(i).start_blk +
  2062. (FDEV(i).total_segments <<
  2063. sbi->log_blocks_per_seg) - 1 +
  2064. le32_to_cpu(raw_super->segment0_blkaddr);
  2065. } else {
  2066. FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
  2067. FDEV(i).end_blk = FDEV(i).start_blk +
  2068. (FDEV(i).total_segments <<
  2069. sbi->log_blocks_per_seg) - 1;
  2070. }
  2071. FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
  2072. sbi->sb->s_mode, sbi->sb->s_type);
  2073. }
  2074. if (IS_ERR(FDEV(i).bdev))
  2075. return PTR_ERR(FDEV(i).bdev);
  2076. /* to release errored devices */
  2077. sbi->s_ndevs = i + 1;
  2078. #ifdef CONFIG_BLK_DEV_ZONED
  2079. if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
  2080. !f2fs_sb_mounted_blkzoned(sbi->sb)) {
  2081. f2fs_msg(sbi->sb, KERN_ERR,
  2082. "Zoned block device feature not enabled\n");
  2083. return -EINVAL;
  2084. }
  2085. if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
  2086. if (init_blkz_info(sbi, i)) {
  2087. f2fs_msg(sbi->sb, KERN_ERR,
  2088. "Failed to initialize F2FS blkzone information");
  2089. return -EINVAL;
  2090. }
  2091. if (max_devices == 1)
  2092. break;
  2093. f2fs_msg(sbi->sb, KERN_INFO,
  2094. "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
  2095. i, FDEV(i).path,
  2096. FDEV(i).total_segments,
  2097. FDEV(i).start_blk, FDEV(i).end_blk,
  2098. bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
  2099. "Host-aware" : "Host-managed");
  2100. continue;
  2101. }
  2102. #endif
  2103. f2fs_msg(sbi->sb, KERN_INFO,
  2104. "Mount Device [%2d]: %20s, %8u, %8x - %8x",
  2105. i, FDEV(i).path,
  2106. FDEV(i).total_segments,
  2107. FDEV(i).start_blk, FDEV(i).end_blk);
  2108. }
  2109. f2fs_msg(sbi->sb, KERN_INFO,
  2110. "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
  2111. return 0;
  2112. }
  2113. static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  2114. {
  2115. struct f2fs_sb_info *sbi;
  2116. struct f2fs_super_block *raw_super;
  2117. struct inode *root;
  2118. int err;
  2119. bool retry = true, need_fsck = false;
  2120. char *options = NULL;
  2121. int recovery, i, valid_super_block;
  2122. struct curseg_info *seg_i;
  2123. try_onemore:
  2124. err = -EINVAL;
  2125. raw_super = NULL;
  2126. valid_super_block = -1;
  2127. recovery = 0;
  2128. /* allocate memory for f2fs-specific super block info */
  2129. sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
  2130. if (!sbi)
  2131. return -ENOMEM;
  2132. sbi->sb = sb;
  2133. /* Load the checksum driver */
  2134. sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
  2135. if (IS_ERR(sbi->s_chksum_driver)) {
  2136. f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
  2137. err = PTR_ERR(sbi->s_chksum_driver);
  2138. sbi->s_chksum_driver = NULL;
  2139. goto free_sbi;
  2140. }
  2141. /* set a block size */
  2142. if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
  2143. f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
  2144. goto free_sbi;
  2145. }
  2146. err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
  2147. &recovery);
  2148. if (err)
  2149. goto free_sbi;
  2150. sb->s_fs_info = sbi;
  2151. sbi->raw_super = raw_super;
  2152. /* precompute checksum seed for metadata */
  2153. if (f2fs_sb_has_inode_chksum(sb))
  2154. sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
  2155. sizeof(raw_super->uuid));
  2156. /*
  2157. * The BLKZONED feature indicates that the drive was formatted with
  2158. * zone alignment optimization. This is optional for host-aware
  2159. * devices, but mandatory for host-managed zoned block devices.
  2160. */
  2161. #ifndef CONFIG_BLK_DEV_ZONED
  2162. if (f2fs_sb_mounted_blkzoned(sb)) {
  2163. f2fs_msg(sb, KERN_ERR,
  2164. "Zoned block device support is not enabled\n");
  2165. err = -EOPNOTSUPP;
  2166. goto free_sb_buf;
  2167. }
  2168. #endif
  2169. default_options(sbi);
  2170. /* parse mount options */
  2171. options = kstrdup((const char *)data, GFP_KERNEL);
  2172. if (data && !options) {
  2173. err = -ENOMEM;
  2174. goto free_sb_buf;
  2175. }
  2176. err = parse_options(sb, options);
  2177. if (err)
  2178. goto free_options;
  2179. sbi->max_file_blocks = max_file_blocks();
  2180. sb->s_maxbytes = sbi->max_file_blocks <<
  2181. le32_to_cpu(raw_super->log_blocksize);
  2182. sb->s_max_links = F2FS_LINK_MAX;
  2183. get_random_bytes(&sbi->s_next_generation, sizeof(u32));
  2184. #ifdef CONFIG_QUOTA
  2185. sb->dq_op = &f2fs_quota_operations;
  2186. if (f2fs_sb_has_quota_ino(sb))
  2187. sb->s_qcop = &dquot_quotactl_sysfile_ops;
  2188. else
  2189. sb->s_qcop = &f2fs_quotactl_ops;
  2190. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  2191. #endif
  2192. sb->s_op = &f2fs_sops;
  2193. #ifdef CONFIG_F2FS_FS_ENCRYPTION
  2194. sb->s_cop = &f2fs_cryptops;
  2195. #endif
  2196. sb->s_xattr = f2fs_xattr_handlers;
  2197. sb->s_export_op = &f2fs_export_ops;
  2198. sb->s_magic = F2FS_SUPER_MAGIC;
  2199. sb->s_time_gran = 1;
  2200. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  2201. (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
  2202. memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
  2203. /* init f2fs-specific super block info */
  2204. sbi->valid_super_block = valid_super_block;
  2205. mutex_init(&sbi->gc_mutex);
  2206. mutex_init(&sbi->cp_mutex);
  2207. init_rwsem(&sbi->node_write);
  2208. init_rwsem(&sbi->node_change);
  2209. /* disallow all the data/node/meta page writes */
  2210. set_sbi_flag(sbi, SBI_POR_DOING);
  2211. spin_lock_init(&sbi->stat_lock);
  2212. /* init iostat info */
  2213. spin_lock_init(&sbi->iostat_lock);
  2214. sbi->iostat_enable = false;
  2215. for (i = 0; i < NR_PAGE_TYPE; i++) {
  2216. int n = (i == META) ? 1: NR_TEMP_TYPE;
  2217. int j;
  2218. sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
  2219. GFP_KERNEL);
  2220. if (!sbi->write_io[i]) {
  2221. err = -ENOMEM;
  2222. goto free_options;
  2223. }
  2224. for (j = HOT; j < n; j++) {
  2225. init_rwsem(&sbi->write_io[i][j].io_rwsem);
  2226. sbi->write_io[i][j].sbi = sbi;
  2227. sbi->write_io[i][j].bio = NULL;
  2228. spin_lock_init(&sbi->write_io[i][j].io_lock);
  2229. INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
  2230. }
  2231. }
  2232. init_rwsem(&sbi->cp_rwsem);
  2233. init_waitqueue_head(&sbi->cp_wait);
  2234. init_sb_info(sbi);
  2235. err = init_percpu_info(sbi);
  2236. if (err)
  2237. goto free_options;
  2238. if (F2FS_IO_SIZE(sbi) > 1) {
  2239. sbi->write_io_dummy =
  2240. mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
  2241. if (!sbi->write_io_dummy) {
  2242. err = -ENOMEM;
  2243. goto free_options;
  2244. }
  2245. }
  2246. /* get an inode for meta space */
  2247. sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
  2248. if (IS_ERR(sbi->meta_inode)) {
  2249. f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
  2250. err = PTR_ERR(sbi->meta_inode);
  2251. goto free_io_dummy;
  2252. }
  2253. err = get_valid_checkpoint(sbi);
  2254. if (err) {
  2255. f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
  2256. goto free_meta_inode;
  2257. }
  2258. /* Initialize device list */
  2259. err = f2fs_scan_devices(sbi);
  2260. if (err) {
  2261. f2fs_msg(sb, KERN_ERR, "Failed to find devices");
  2262. goto free_devices;
  2263. }
  2264. sbi->total_valid_node_count =
  2265. le32_to_cpu(sbi->ckpt->valid_node_count);
  2266. percpu_counter_set(&sbi->total_valid_inode_count,
  2267. le32_to_cpu(sbi->ckpt->valid_inode_count));
  2268. sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
  2269. sbi->total_valid_block_count =
  2270. le64_to_cpu(sbi->ckpt->valid_block_count);
  2271. sbi->last_valid_block_count = sbi->total_valid_block_count;
  2272. sbi->reserved_blocks = 0;
  2273. sbi->current_reserved_blocks = 0;
  2274. for (i = 0; i < NR_INODE_TYPE; i++) {
  2275. INIT_LIST_HEAD(&sbi->inode_list[i]);
  2276. spin_lock_init(&sbi->inode_lock[i]);
  2277. }
  2278. init_extent_cache_info(sbi);
  2279. init_ino_entry_info(sbi);
  2280. /* setup f2fs internal modules */
  2281. err = build_segment_manager(sbi);
  2282. if (err) {
  2283. f2fs_msg(sb, KERN_ERR,
  2284. "Failed to initialize F2FS segment manager");
  2285. goto free_sm;
  2286. }
  2287. err = build_node_manager(sbi);
  2288. if (err) {
  2289. f2fs_msg(sb, KERN_ERR,
  2290. "Failed to initialize F2FS node manager");
  2291. goto free_nm;
  2292. }
  2293. /* For write statistics */
  2294. if (sb->s_bdev->bd_part)
  2295. sbi->sectors_written_start =
  2296. (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
  2297. /* Read accumulated write IO statistics if exists */
  2298. seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
  2299. if (__exist_node_summaries(sbi))
  2300. sbi->kbytes_written =
  2301. le64_to_cpu(seg_i->journal->info.kbytes_written);
  2302. build_gc_manager(sbi);
  2303. /* get an inode for node space */
  2304. sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
  2305. if (IS_ERR(sbi->node_inode)) {
  2306. f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
  2307. err = PTR_ERR(sbi->node_inode);
  2308. goto free_nm;
  2309. }
  2310. f2fs_join_shrinker(sbi);
  2311. err = f2fs_build_stats(sbi);
  2312. if (err)
  2313. goto free_nm;
  2314. /* read root inode and dentry */
  2315. root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
  2316. if (IS_ERR(root)) {
  2317. f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
  2318. err = PTR_ERR(root);
  2319. goto free_node_inode;
  2320. }
  2321. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  2322. iput(root);
  2323. err = -EINVAL;
  2324. goto free_node_inode;
  2325. }
  2326. sb->s_root = d_make_root(root); /* allocate root dentry */
  2327. if (!sb->s_root) {
  2328. err = -ENOMEM;
  2329. goto free_root_inode;
  2330. }
  2331. err = f2fs_register_sysfs(sbi);
  2332. if (err)
  2333. goto free_root_inode;
  2334. #ifdef CONFIG_QUOTA
  2335. /*
  2336. * Turn on quotas which were not enabled for read-only mounts if
  2337. * filesystem has quota feature, so that they are updated correctly.
  2338. */
  2339. if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) {
  2340. err = f2fs_enable_quotas(sb);
  2341. if (err) {
  2342. f2fs_msg(sb, KERN_ERR,
  2343. "Cannot turn on quotas: error %d", err);
  2344. goto free_sysfs;
  2345. }
  2346. }
  2347. #endif
  2348. /* if there are nt orphan nodes free them */
  2349. err = recover_orphan_inodes(sbi);
  2350. if (err)
  2351. goto free_meta;
  2352. /* recover fsynced data */
  2353. if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
  2354. /*
  2355. * mount should be failed, when device has readonly mode, and
  2356. * previous checkpoint was not done by clean system shutdown.
  2357. */
  2358. if (bdev_read_only(sb->s_bdev) &&
  2359. !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
  2360. err = -EROFS;
  2361. goto free_meta;
  2362. }
  2363. if (need_fsck)
  2364. set_sbi_flag(sbi, SBI_NEED_FSCK);
  2365. if (!retry)
  2366. goto skip_recovery;
  2367. err = recover_fsync_data(sbi, false);
  2368. if (err < 0) {
  2369. need_fsck = true;
  2370. f2fs_msg(sb, KERN_ERR,
  2371. "Cannot recover all fsync data errno=%d", err);
  2372. goto free_meta;
  2373. }
  2374. } else {
  2375. err = recover_fsync_data(sbi, true);
  2376. if (!f2fs_readonly(sb) && err > 0) {
  2377. err = -EINVAL;
  2378. f2fs_msg(sb, KERN_ERR,
  2379. "Need to recover fsync data");
  2380. goto free_meta;
  2381. }
  2382. }
  2383. skip_recovery:
  2384. /* recover_fsync_data() cleared this already */
  2385. clear_sbi_flag(sbi, SBI_POR_DOING);
  2386. /*
  2387. * If filesystem is not mounted as read-only then
  2388. * do start the gc_thread.
  2389. */
  2390. if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
  2391. /* After POR, we can run background GC thread.*/
  2392. err = start_gc_thread(sbi);
  2393. if (err)
  2394. goto free_meta;
  2395. }
  2396. kfree(options);
  2397. /* recover broken superblock */
  2398. if (recovery) {
  2399. err = f2fs_commit_super(sbi, true);
  2400. f2fs_msg(sb, KERN_INFO,
  2401. "Try to recover %dth superblock, ret: %d",
  2402. sbi->valid_super_block ? 1 : 2, err);
  2403. }
  2404. f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
  2405. cur_cp_version(F2FS_CKPT(sbi)));
  2406. f2fs_update_time(sbi, CP_TIME);
  2407. f2fs_update_time(sbi, REQ_TIME);
  2408. return 0;
  2409. free_meta:
  2410. #ifdef CONFIG_QUOTA
  2411. if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb))
  2412. f2fs_quota_off_umount(sbi->sb);
  2413. #endif
  2414. f2fs_sync_inode_meta(sbi);
  2415. /*
  2416. * Some dirty meta pages can be produced by recover_orphan_inodes()
  2417. * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
  2418. * followed by write_checkpoint() through f2fs_write_node_pages(), which
  2419. * falls into an infinite loop in sync_meta_pages().
  2420. */
  2421. truncate_inode_pages_final(META_MAPPING(sbi));
  2422. #ifdef CONFIG_QUOTA
  2423. free_sysfs:
  2424. #endif
  2425. f2fs_unregister_sysfs(sbi);
  2426. free_root_inode:
  2427. dput(sb->s_root);
  2428. sb->s_root = NULL;
  2429. free_node_inode:
  2430. truncate_inode_pages_final(NODE_MAPPING(sbi));
  2431. mutex_lock(&sbi->umount_mutex);
  2432. release_ino_entry(sbi, true);
  2433. f2fs_leave_shrinker(sbi);
  2434. iput(sbi->node_inode);
  2435. mutex_unlock(&sbi->umount_mutex);
  2436. f2fs_destroy_stats(sbi);
  2437. free_nm:
  2438. destroy_node_manager(sbi);
  2439. free_sm:
  2440. destroy_segment_manager(sbi);
  2441. free_devices:
  2442. destroy_device_list(sbi);
  2443. kfree(sbi->ckpt);
  2444. free_meta_inode:
  2445. make_bad_inode(sbi->meta_inode);
  2446. iput(sbi->meta_inode);
  2447. free_io_dummy:
  2448. mempool_destroy(sbi->write_io_dummy);
  2449. free_options:
  2450. for (i = 0; i < NR_PAGE_TYPE; i++)
  2451. kfree(sbi->write_io[i]);
  2452. destroy_percpu_info(sbi);
  2453. #ifdef CONFIG_QUOTA
  2454. for (i = 0; i < MAXQUOTAS; i++)
  2455. kfree(sbi->s_qf_names[i]);
  2456. #endif
  2457. kfree(options);
  2458. free_sb_buf:
  2459. kfree(raw_super);
  2460. free_sbi:
  2461. if (sbi->s_chksum_driver)
  2462. crypto_free_shash(sbi->s_chksum_driver);
  2463. kfree(sbi);
  2464. /* give only one another chance */
  2465. if (retry) {
  2466. retry = false;
  2467. shrink_dcache_sb(sb);
  2468. goto try_onemore;
  2469. }
  2470. return err;
  2471. }
  2472. static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
  2473. const char *dev_name, void *data)
  2474. {
  2475. return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
  2476. }
  2477. static void kill_f2fs_super(struct super_block *sb)
  2478. {
  2479. if (sb->s_root) {
  2480. set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
  2481. stop_gc_thread(F2FS_SB(sb));
  2482. stop_discard_thread(F2FS_SB(sb));
  2483. }
  2484. kill_block_super(sb);
  2485. }
  2486. static struct file_system_type f2fs_fs_type = {
  2487. .owner = THIS_MODULE,
  2488. .name = "f2fs",
  2489. .mount = f2fs_mount,
  2490. .kill_sb = kill_f2fs_super,
  2491. .fs_flags = FS_REQUIRES_DEV,
  2492. };
  2493. MODULE_ALIAS_FS("f2fs");
  2494. static int __init init_inodecache(void)
  2495. {
  2496. f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
  2497. sizeof(struct f2fs_inode_info), 0,
  2498. SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
  2499. if (!f2fs_inode_cachep)
  2500. return -ENOMEM;
  2501. return 0;
  2502. }
  2503. static void destroy_inodecache(void)
  2504. {
  2505. /*
  2506. * Make sure all delayed rcu free inodes are flushed before we
  2507. * destroy cache.
  2508. */
  2509. rcu_barrier();
  2510. kmem_cache_destroy(f2fs_inode_cachep);
  2511. }
  2512. static int __init init_f2fs_fs(void)
  2513. {
  2514. int err;
  2515. f2fs_build_trace_ios();
  2516. err = init_inodecache();
  2517. if (err)
  2518. goto fail;
  2519. err = create_node_manager_caches();
  2520. if (err)
  2521. goto free_inodecache;
  2522. err = create_segment_manager_caches();
  2523. if (err)
  2524. goto free_node_manager_caches;
  2525. err = create_checkpoint_caches();
  2526. if (err)
  2527. goto free_segment_manager_caches;
  2528. err = create_extent_cache();
  2529. if (err)
  2530. goto free_checkpoint_caches;
  2531. err = f2fs_init_sysfs();
  2532. if (err)
  2533. goto free_extent_cache;
  2534. err = register_shrinker(&f2fs_shrinker_info);
  2535. if (err)
  2536. goto free_sysfs;
  2537. err = register_filesystem(&f2fs_fs_type);
  2538. if (err)
  2539. goto free_shrinker;
  2540. err = f2fs_create_root_stats();
  2541. if (err)
  2542. goto free_filesystem;
  2543. return 0;
  2544. free_filesystem:
  2545. unregister_filesystem(&f2fs_fs_type);
  2546. free_shrinker:
  2547. unregister_shrinker(&f2fs_shrinker_info);
  2548. free_sysfs:
  2549. f2fs_exit_sysfs();
  2550. free_extent_cache:
  2551. destroy_extent_cache();
  2552. free_checkpoint_caches:
  2553. destroy_checkpoint_caches();
  2554. free_segment_manager_caches:
  2555. destroy_segment_manager_caches();
  2556. free_node_manager_caches:
  2557. destroy_node_manager_caches();
  2558. free_inodecache:
  2559. destroy_inodecache();
  2560. fail:
  2561. return err;
  2562. }
  2563. static void __exit exit_f2fs_fs(void)
  2564. {
  2565. f2fs_destroy_root_stats();
  2566. unregister_filesystem(&f2fs_fs_type);
  2567. unregister_shrinker(&f2fs_shrinker_info);
  2568. f2fs_exit_sysfs();
  2569. destroy_extent_cache();
  2570. destroy_checkpoint_caches();
  2571. destroy_segment_manager_caches();
  2572. destroy_node_manager_caches();
  2573. destroy_inodecache();
  2574. f2fs_destroy_trace_ios();
  2575. }
  2576. module_init(init_f2fs_fs)
  2577. module_exit(exit_f2fs_fs)
  2578. MODULE_AUTHOR("Samsung Electronics's Praesto Team");
  2579. MODULE_DESCRIPTION("Flash Friendly File System");
  2580. MODULE_LICENSE("GPL");