super.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051
  1. /*
  2. * fs/f2fs/super.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/fs.h>
  14. #include <linux/statfs.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/backing-dev.h>
  17. #include <linux/kthread.h>
  18. #include <linux/parser.h>
  19. #include <linux/mount.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/proc_fs.h>
  22. #include <linux/random.h>
  23. #include <linux/exportfs.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/f2fs_fs.h>
  26. #include <linux/sysfs.h>
  27. #include "f2fs.h"
  28. #include "node.h"
  29. #include "segment.h"
  30. #include "xattr.h"
  31. #include "gc.h"
  32. #include "trace.h"
  33. #define CREATE_TRACE_POINTS
  34. #include <trace/events/f2fs.h>
  35. static struct proc_dir_entry *f2fs_proc_root;
  36. static struct kmem_cache *f2fs_inode_cachep;
  37. static struct kset *f2fs_kset;
  38. #ifdef CONFIG_F2FS_FAULT_INJECTION
  39. struct f2fs_fault_info f2fs_fault;
  40. char *fault_name[FAULT_MAX] = {
  41. [FAULT_KMALLOC] = "kmalloc",
  42. [FAULT_PAGE_ALLOC] = "page alloc",
  43. [FAULT_ALLOC_NID] = "alloc nid",
  44. [FAULT_ORPHAN] = "orphan",
  45. [FAULT_BLOCK] = "no more block",
  46. [FAULT_DIR_DEPTH] = "too big dir depth",
  47. [FAULT_EVICT_INODE] = "evict_inode fail",
  48. };
  49. static void f2fs_build_fault_attr(unsigned int rate)
  50. {
  51. if (rate) {
  52. atomic_set(&f2fs_fault.inject_ops, 0);
  53. f2fs_fault.inject_rate = rate;
  54. f2fs_fault.inject_type = (1 << FAULT_MAX) - 1;
  55. } else {
  56. memset(&f2fs_fault, 0, sizeof(struct f2fs_fault_info));
  57. }
  58. }
  59. #endif
  60. /* f2fs-wide shrinker description */
  61. static struct shrinker f2fs_shrinker_info = {
  62. .scan_objects = f2fs_shrink_scan,
  63. .count_objects = f2fs_shrink_count,
  64. .seeks = DEFAULT_SEEKS,
  65. };
  66. enum {
  67. Opt_gc_background,
  68. Opt_disable_roll_forward,
  69. Opt_norecovery,
  70. Opt_discard,
  71. Opt_nodiscard,
  72. Opt_noheap,
  73. Opt_user_xattr,
  74. Opt_nouser_xattr,
  75. Opt_acl,
  76. Opt_noacl,
  77. Opt_active_logs,
  78. Opt_disable_ext_identify,
  79. Opt_inline_xattr,
  80. Opt_inline_data,
  81. Opt_inline_dentry,
  82. Opt_flush_merge,
  83. Opt_noflush_merge,
  84. Opt_nobarrier,
  85. Opt_fastboot,
  86. Opt_extent_cache,
  87. Opt_noextent_cache,
  88. Opt_noinline_data,
  89. Opt_data_flush,
  90. Opt_mode,
  91. Opt_fault_injection,
  92. Opt_lazytime,
  93. Opt_nolazytime,
  94. Opt_err,
  95. };
  96. static match_table_t f2fs_tokens = {
  97. {Opt_gc_background, "background_gc=%s"},
  98. {Opt_disable_roll_forward, "disable_roll_forward"},
  99. {Opt_norecovery, "norecovery"},
  100. {Opt_discard, "discard"},
  101. {Opt_nodiscard, "nodiscard"},
  102. {Opt_noheap, "no_heap"},
  103. {Opt_user_xattr, "user_xattr"},
  104. {Opt_nouser_xattr, "nouser_xattr"},
  105. {Opt_acl, "acl"},
  106. {Opt_noacl, "noacl"},
  107. {Opt_active_logs, "active_logs=%u"},
  108. {Opt_disable_ext_identify, "disable_ext_identify"},
  109. {Opt_inline_xattr, "inline_xattr"},
  110. {Opt_inline_data, "inline_data"},
  111. {Opt_inline_dentry, "inline_dentry"},
  112. {Opt_flush_merge, "flush_merge"},
  113. {Opt_noflush_merge, "noflush_merge"},
  114. {Opt_nobarrier, "nobarrier"},
  115. {Opt_fastboot, "fastboot"},
  116. {Opt_extent_cache, "extent_cache"},
  117. {Opt_noextent_cache, "noextent_cache"},
  118. {Opt_noinline_data, "noinline_data"},
  119. {Opt_data_flush, "data_flush"},
  120. {Opt_mode, "mode=%s"},
  121. {Opt_fault_injection, "fault_injection=%u"},
  122. {Opt_lazytime, "lazytime"},
  123. {Opt_nolazytime, "nolazytime"},
  124. {Opt_err, NULL},
  125. };
  126. /* Sysfs support for f2fs */
  127. enum {
  128. GC_THREAD, /* struct f2fs_gc_thread */
  129. SM_INFO, /* struct f2fs_sm_info */
  130. NM_INFO, /* struct f2fs_nm_info */
  131. F2FS_SBI, /* struct f2fs_sb_info */
  132. #ifdef CONFIG_F2FS_FAULT_INJECTION
  133. FAULT_INFO_RATE, /* struct f2fs_fault_info */
  134. FAULT_INFO_TYPE, /* struct f2fs_fault_info */
  135. #endif
  136. };
  137. struct f2fs_attr {
  138. struct attribute attr;
  139. ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
  140. ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
  141. const char *, size_t);
  142. int struct_type;
  143. int offset;
  144. };
  145. static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
  146. {
  147. if (struct_type == GC_THREAD)
  148. return (unsigned char *)sbi->gc_thread;
  149. else if (struct_type == SM_INFO)
  150. return (unsigned char *)SM_I(sbi);
  151. else if (struct_type == NM_INFO)
  152. return (unsigned char *)NM_I(sbi);
  153. else if (struct_type == F2FS_SBI)
  154. return (unsigned char *)sbi;
  155. #ifdef CONFIG_F2FS_FAULT_INJECTION
  156. else if (struct_type == FAULT_INFO_RATE ||
  157. struct_type == FAULT_INFO_TYPE)
  158. return (unsigned char *)&f2fs_fault;
  159. #endif
  160. return NULL;
  161. }
  162. static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
  163. struct f2fs_sb_info *sbi, char *buf)
  164. {
  165. struct super_block *sb = sbi->sb;
  166. if (!sb->s_bdev->bd_part)
  167. return snprintf(buf, PAGE_SIZE, "0\n");
  168. return snprintf(buf, PAGE_SIZE, "%llu\n",
  169. (unsigned long long)(sbi->kbytes_written +
  170. BD_PART_WRITTEN(sbi)));
  171. }
  172. static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
  173. struct f2fs_sb_info *sbi, char *buf)
  174. {
  175. unsigned char *ptr = NULL;
  176. unsigned int *ui;
  177. ptr = __struct_ptr(sbi, a->struct_type);
  178. if (!ptr)
  179. return -EINVAL;
  180. ui = (unsigned int *)(ptr + a->offset);
  181. return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
  182. }
  183. static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
  184. struct f2fs_sb_info *sbi,
  185. const char *buf, size_t count)
  186. {
  187. unsigned char *ptr;
  188. unsigned long t;
  189. unsigned int *ui;
  190. ssize_t ret;
  191. ptr = __struct_ptr(sbi, a->struct_type);
  192. if (!ptr)
  193. return -EINVAL;
  194. ui = (unsigned int *)(ptr + a->offset);
  195. ret = kstrtoul(skip_spaces(buf), 0, &t);
  196. if (ret < 0)
  197. return ret;
  198. #ifdef CONFIG_F2FS_FAULT_INJECTION
  199. if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
  200. return -EINVAL;
  201. #endif
  202. *ui = t;
  203. return count;
  204. }
  205. static ssize_t f2fs_attr_show(struct kobject *kobj,
  206. struct attribute *attr, char *buf)
  207. {
  208. struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
  209. s_kobj);
  210. struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
  211. return a->show ? a->show(a, sbi, buf) : 0;
  212. }
  213. static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
  214. const char *buf, size_t len)
  215. {
  216. struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
  217. s_kobj);
  218. struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
  219. return a->store ? a->store(a, sbi, buf, len) : 0;
  220. }
  221. static void f2fs_sb_release(struct kobject *kobj)
  222. {
  223. struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
  224. s_kobj);
  225. complete(&sbi->s_kobj_unregister);
  226. }
  227. #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
  228. static struct f2fs_attr f2fs_attr_##_name = { \
  229. .attr = {.name = __stringify(_name), .mode = _mode }, \
  230. .show = _show, \
  231. .store = _store, \
  232. .struct_type = _struct_type, \
  233. .offset = _offset \
  234. }
  235. #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
  236. F2FS_ATTR_OFFSET(struct_type, name, 0644, \
  237. f2fs_sbi_show, f2fs_sbi_store, \
  238. offsetof(struct struct_name, elname))
  239. #define F2FS_GENERAL_RO_ATTR(name) \
  240. static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
  241. F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
  242. F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
  243. F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
  244. F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
  245. F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
  246. F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
  247. F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
  248. F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
  249. F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
  250. F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
  251. F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
  252. F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
  253. F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
  254. F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
  255. F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
  256. F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
  257. F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
  258. #ifdef CONFIG_F2FS_FAULT_INJECTION
  259. F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
  260. F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
  261. #endif
  262. F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
  263. #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
  264. static struct attribute *f2fs_attrs[] = {
  265. ATTR_LIST(gc_min_sleep_time),
  266. ATTR_LIST(gc_max_sleep_time),
  267. ATTR_LIST(gc_no_gc_sleep_time),
  268. ATTR_LIST(gc_idle),
  269. ATTR_LIST(reclaim_segments),
  270. ATTR_LIST(max_small_discards),
  271. ATTR_LIST(batched_trim_sections),
  272. ATTR_LIST(ipu_policy),
  273. ATTR_LIST(min_ipu_util),
  274. ATTR_LIST(min_fsync_blocks),
  275. ATTR_LIST(max_victim_search),
  276. ATTR_LIST(dir_level),
  277. ATTR_LIST(ram_thresh),
  278. ATTR_LIST(ra_nid_pages),
  279. ATTR_LIST(dirty_nats_ratio),
  280. ATTR_LIST(cp_interval),
  281. ATTR_LIST(idle_interval),
  282. ATTR_LIST(lifetime_write_kbytes),
  283. NULL,
  284. };
  285. static const struct sysfs_ops f2fs_attr_ops = {
  286. .show = f2fs_attr_show,
  287. .store = f2fs_attr_store,
  288. };
  289. static struct kobj_type f2fs_ktype = {
  290. .default_attrs = f2fs_attrs,
  291. .sysfs_ops = &f2fs_attr_ops,
  292. .release = f2fs_sb_release,
  293. };
  294. #ifdef CONFIG_F2FS_FAULT_INJECTION
  295. /* sysfs for f2fs fault injection */
  296. static struct kobject f2fs_fault_inject;
  297. static struct attribute *f2fs_fault_attrs[] = {
  298. ATTR_LIST(inject_rate),
  299. ATTR_LIST(inject_type),
  300. NULL
  301. };
  302. static struct kobj_type f2fs_fault_ktype = {
  303. .default_attrs = f2fs_fault_attrs,
  304. .sysfs_ops = &f2fs_attr_ops,
  305. };
  306. #endif
  307. void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
  308. {
  309. struct va_format vaf;
  310. va_list args;
  311. va_start(args, fmt);
  312. vaf.fmt = fmt;
  313. vaf.va = &args;
  314. printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
  315. va_end(args);
  316. }
  317. static void init_once(void *foo)
  318. {
  319. struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
  320. inode_init_once(&fi->vfs_inode);
  321. }
  322. static int parse_options(struct super_block *sb, char *options)
  323. {
  324. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  325. struct request_queue *q;
  326. substring_t args[MAX_OPT_ARGS];
  327. char *p, *name;
  328. int arg = 0;
  329. #ifdef CONFIG_F2FS_FAULT_INJECTION
  330. f2fs_build_fault_attr(0);
  331. #endif
  332. if (!options)
  333. return 0;
  334. while ((p = strsep(&options, ",")) != NULL) {
  335. int token;
  336. if (!*p)
  337. continue;
  338. /*
  339. * Initialize args struct so we know whether arg was
  340. * found; some options take optional arguments.
  341. */
  342. args[0].to = args[0].from = NULL;
  343. token = match_token(p, f2fs_tokens, args);
  344. switch (token) {
  345. case Opt_gc_background:
  346. name = match_strdup(&args[0]);
  347. if (!name)
  348. return -ENOMEM;
  349. if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
  350. set_opt(sbi, BG_GC);
  351. clear_opt(sbi, FORCE_FG_GC);
  352. } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
  353. clear_opt(sbi, BG_GC);
  354. clear_opt(sbi, FORCE_FG_GC);
  355. } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
  356. set_opt(sbi, BG_GC);
  357. set_opt(sbi, FORCE_FG_GC);
  358. } else {
  359. kfree(name);
  360. return -EINVAL;
  361. }
  362. kfree(name);
  363. break;
  364. case Opt_disable_roll_forward:
  365. set_opt(sbi, DISABLE_ROLL_FORWARD);
  366. break;
  367. case Opt_norecovery:
  368. /* this option mounts f2fs with ro */
  369. set_opt(sbi, DISABLE_ROLL_FORWARD);
  370. if (!f2fs_readonly(sb))
  371. return -EINVAL;
  372. break;
  373. case Opt_discard:
  374. q = bdev_get_queue(sb->s_bdev);
  375. if (blk_queue_discard(q)) {
  376. set_opt(sbi, DISCARD);
  377. } else {
  378. f2fs_msg(sb, KERN_WARNING,
  379. "mounting with \"discard\" option, but "
  380. "the device does not support discard");
  381. }
  382. break;
  383. case Opt_nodiscard:
  384. clear_opt(sbi, DISCARD);
  385. case Opt_noheap:
  386. set_opt(sbi, NOHEAP);
  387. break;
  388. #ifdef CONFIG_F2FS_FS_XATTR
  389. case Opt_user_xattr:
  390. set_opt(sbi, XATTR_USER);
  391. break;
  392. case Opt_nouser_xattr:
  393. clear_opt(sbi, XATTR_USER);
  394. break;
  395. case Opt_inline_xattr:
  396. set_opt(sbi, INLINE_XATTR);
  397. break;
  398. #else
  399. case Opt_user_xattr:
  400. f2fs_msg(sb, KERN_INFO,
  401. "user_xattr options not supported");
  402. break;
  403. case Opt_nouser_xattr:
  404. f2fs_msg(sb, KERN_INFO,
  405. "nouser_xattr options not supported");
  406. break;
  407. case Opt_inline_xattr:
  408. f2fs_msg(sb, KERN_INFO,
  409. "inline_xattr options not supported");
  410. break;
  411. #endif
  412. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  413. case Opt_acl:
  414. set_opt(sbi, POSIX_ACL);
  415. break;
  416. case Opt_noacl:
  417. clear_opt(sbi, POSIX_ACL);
  418. break;
  419. #else
  420. case Opt_acl:
  421. f2fs_msg(sb, KERN_INFO, "acl options not supported");
  422. break;
  423. case Opt_noacl:
  424. f2fs_msg(sb, KERN_INFO, "noacl options not supported");
  425. break;
  426. #endif
  427. case Opt_active_logs:
  428. if (args->from && match_int(args, &arg))
  429. return -EINVAL;
  430. if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
  431. return -EINVAL;
  432. sbi->active_logs = arg;
  433. break;
  434. case Opt_disable_ext_identify:
  435. set_opt(sbi, DISABLE_EXT_IDENTIFY);
  436. break;
  437. case Opt_inline_data:
  438. set_opt(sbi, INLINE_DATA);
  439. break;
  440. case Opt_inline_dentry:
  441. set_opt(sbi, INLINE_DENTRY);
  442. break;
  443. case Opt_flush_merge:
  444. set_opt(sbi, FLUSH_MERGE);
  445. break;
  446. case Opt_noflush_merge:
  447. clear_opt(sbi, FLUSH_MERGE);
  448. break;
  449. case Opt_nobarrier:
  450. set_opt(sbi, NOBARRIER);
  451. break;
  452. case Opt_fastboot:
  453. set_opt(sbi, FASTBOOT);
  454. break;
  455. case Opt_extent_cache:
  456. set_opt(sbi, EXTENT_CACHE);
  457. break;
  458. case Opt_noextent_cache:
  459. clear_opt(sbi, EXTENT_CACHE);
  460. break;
  461. case Opt_noinline_data:
  462. clear_opt(sbi, INLINE_DATA);
  463. break;
  464. case Opt_data_flush:
  465. set_opt(sbi, DATA_FLUSH);
  466. break;
  467. case Opt_mode:
  468. name = match_strdup(&args[0]);
  469. if (!name)
  470. return -ENOMEM;
  471. if (strlen(name) == 8 &&
  472. !strncmp(name, "adaptive", 8)) {
  473. set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
  474. } else if (strlen(name) == 3 &&
  475. !strncmp(name, "lfs", 3)) {
  476. set_opt_mode(sbi, F2FS_MOUNT_LFS);
  477. } else {
  478. kfree(name);
  479. return -EINVAL;
  480. }
  481. kfree(name);
  482. break;
  483. case Opt_fault_injection:
  484. if (args->from && match_int(args, &arg))
  485. return -EINVAL;
  486. #ifdef CONFIG_F2FS_FAULT_INJECTION
  487. f2fs_build_fault_attr(arg);
  488. #else
  489. f2fs_msg(sb, KERN_INFO,
  490. "FAULT_INJECTION was not selected");
  491. #endif
  492. break;
  493. case Opt_lazytime:
  494. sb->s_flags |= MS_LAZYTIME;
  495. break;
  496. case Opt_nolazytime:
  497. sb->s_flags &= ~MS_LAZYTIME;
  498. break;
  499. default:
  500. f2fs_msg(sb, KERN_ERR,
  501. "Unrecognized mount option \"%s\" or missing value",
  502. p);
  503. return -EINVAL;
  504. }
  505. }
  506. return 0;
  507. }
  508. static struct inode *f2fs_alloc_inode(struct super_block *sb)
  509. {
  510. struct f2fs_inode_info *fi;
  511. fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
  512. if (!fi)
  513. return NULL;
  514. init_once((void *) fi);
  515. if (percpu_counter_init(&fi->dirty_pages, 0, GFP_NOFS)) {
  516. kmem_cache_free(f2fs_inode_cachep, fi);
  517. return NULL;
  518. }
  519. /* Initialize f2fs-specific inode info */
  520. fi->vfs_inode.i_version = 1;
  521. fi->i_current_depth = 1;
  522. fi->i_advise = 0;
  523. init_rwsem(&fi->i_sem);
  524. INIT_LIST_HEAD(&fi->dirty_list);
  525. INIT_LIST_HEAD(&fi->gdirty_list);
  526. INIT_LIST_HEAD(&fi->inmem_pages);
  527. mutex_init(&fi->inmem_lock);
  528. init_rwsem(&fi->dio_rwsem[READ]);
  529. init_rwsem(&fi->dio_rwsem[WRITE]);
  530. /* Will be used by directory only */
  531. fi->i_dir_level = F2FS_SB(sb)->dir_level;
  532. return &fi->vfs_inode;
  533. }
  534. static int f2fs_drop_inode(struct inode *inode)
  535. {
  536. /*
  537. * This is to avoid a deadlock condition like below.
  538. * writeback_single_inode(inode)
  539. * - f2fs_write_data_page
  540. * - f2fs_gc -> iput -> evict
  541. * - inode_wait_for_writeback(inode)
  542. */
  543. if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
  544. if (!inode->i_nlink && !is_bad_inode(inode)) {
  545. /* to avoid evict_inode call simultaneously */
  546. atomic_inc(&inode->i_count);
  547. spin_unlock(&inode->i_lock);
  548. /* some remained atomic pages should discarded */
  549. if (f2fs_is_atomic_file(inode))
  550. drop_inmem_pages(inode);
  551. /* should remain fi->extent_tree for writepage */
  552. f2fs_destroy_extent_node(inode);
  553. sb_start_intwrite(inode->i_sb);
  554. f2fs_i_size_write(inode, 0);
  555. if (F2FS_HAS_BLOCKS(inode))
  556. f2fs_truncate(inode);
  557. sb_end_intwrite(inode->i_sb);
  558. fscrypt_put_encryption_info(inode, NULL);
  559. spin_lock(&inode->i_lock);
  560. atomic_dec(&inode->i_count);
  561. }
  562. return 0;
  563. }
  564. return generic_drop_inode(inode);
  565. }
  566. int f2fs_inode_dirtied(struct inode *inode)
  567. {
  568. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  569. spin_lock(&sbi->inode_lock[DIRTY_META]);
  570. if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  571. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  572. return 1;
  573. }
  574. set_inode_flag(inode, FI_DIRTY_INODE);
  575. list_add_tail(&F2FS_I(inode)->gdirty_list,
  576. &sbi->inode_list[DIRTY_META]);
  577. inc_page_count(sbi, F2FS_DIRTY_IMETA);
  578. stat_inc_dirty_inode(sbi, DIRTY_META);
  579. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  580. return 0;
  581. }
  582. void f2fs_inode_synced(struct inode *inode)
  583. {
  584. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  585. spin_lock(&sbi->inode_lock[DIRTY_META]);
  586. if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  587. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  588. return;
  589. }
  590. list_del_init(&F2FS_I(inode)->gdirty_list);
  591. clear_inode_flag(inode, FI_DIRTY_INODE);
  592. clear_inode_flag(inode, FI_AUTO_RECOVER);
  593. dec_page_count(sbi, F2FS_DIRTY_IMETA);
  594. stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
  595. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  596. }
  597. /*
  598. * f2fs_dirty_inode() is called from __mark_inode_dirty()
  599. *
  600. * We should call set_dirty_inode to write the dirty inode through write_inode.
  601. */
  602. static void f2fs_dirty_inode(struct inode *inode, int flags)
  603. {
  604. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  605. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  606. inode->i_ino == F2FS_META_INO(sbi))
  607. return;
  608. if (flags == I_DIRTY_TIME)
  609. return;
  610. if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
  611. clear_inode_flag(inode, FI_AUTO_RECOVER);
  612. f2fs_inode_dirtied(inode);
  613. }
  614. static void f2fs_i_callback(struct rcu_head *head)
  615. {
  616. struct inode *inode = container_of(head, struct inode, i_rcu);
  617. kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
  618. }
  619. static void f2fs_destroy_inode(struct inode *inode)
  620. {
  621. percpu_counter_destroy(&F2FS_I(inode)->dirty_pages);
  622. call_rcu(&inode->i_rcu, f2fs_i_callback);
  623. }
  624. static void destroy_percpu_info(struct f2fs_sb_info *sbi)
  625. {
  626. int i;
  627. for (i = 0; i < NR_COUNT_TYPE; i++)
  628. percpu_counter_destroy(&sbi->nr_pages[i]);
  629. percpu_counter_destroy(&sbi->alloc_valid_block_count);
  630. percpu_counter_destroy(&sbi->total_valid_inode_count);
  631. }
  632. static void f2fs_put_super(struct super_block *sb)
  633. {
  634. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  635. if (sbi->s_proc) {
  636. remove_proc_entry("segment_info", sbi->s_proc);
  637. remove_proc_entry("segment_bits", sbi->s_proc);
  638. remove_proc_entry(sb->s_id, f2fs_proc_root);
  639. }
  640. kobject_del(&sbi->s_kobj);
  641. stop_gc_thread(sbi);
  642. /* prevent remaining shrinker jobs */
  643. mutex_lock(&sbi->umount_mutex);
  644. /*
  645. * We don't need to do checkpoint when superblock is clean.
  646. * But, the previous checkpoint was not done by umount, it needs to do
  647. * clean checkpoint again.
  648. */
  649. if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
  650. !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
  651. struct cp_control cpc = {
  652. .reason = CP_UMOUNT,
  653. };
  654. write_checkpoint(sbi, &cpc);
  655. }
  656. /* write_checkpoint can update stat informaion */
  657. f2fs_destroy_stats(sbi);
  658. /*
  659. * normally superblock is clean, so we need to release this.
  660. * In addition, EIO will skip do checkpoint, we need this as well.
  661. */
  662. release_ino_entry(sbi, true);
  663. release_discard_addrs(sbi);
  664. f2fs_leave_shrinker(sbi);
  665. mutex_unlock(&sbi->umount_mutex);
  666. /* our cp_error case, we can wait for any writeback page */
  667. f2fs_flush_merged_bios(sbi);
  668. iput(sbi->node_inode);
  669. iput(sbi->meta_inode);
  670. /* destroy f2fs internal modules */
  671. destroy_node_manager(sbi);
  672. destroy_segment_manager(sbi);
  673. kfree(sbi->ckpt);
  674. kobject_put(&sbi->s_kobj);
  675. wait_for_completion(&sbi->s_kobj_unregister);
  676. sb->s_fs_info = NULL;
  677. if (sbi->s_chksum_driver)
  678. crypto_free_shash(sbi->s_chksum_driver);
  679. kfree(sbi->raw_super);
  680. destroy_percpu_info(sbi);
  681. kfree(sbi);
  682. }
  683. int f2fs_sync_fs(struct super_block *sb, int sync)
  684. {
  685. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  686. int err = 0;
  687. trace_f2fs_sync_fs(sb, sync);
  688. if (sync) {
  689. struct cp_control cpc;
  690. cpc.reason = __get_cp_reason(sbi);
  691. mutex_lock(&sbi->gc_mutex);
  692. err = write_checkpoint(sbi, &cpc);
  693. mutex_unlock(&sbi->gc_mutex);
  694. }
  695. f2fs_trace_ios(NULL, 1);
  696. return err;
  697. }
  698. static int f2fs_freeze(struct super_block *sb)
  699. {
  700. int err;
  701. if (f2fs_readonly(sb))
  702. return 0;
  703. err = f2fs_sync_fs(sb, 1);
  704. return err;
  705. }
  706. static int f2fs_unfreeze(struct super_block *sb)
  707. {
  708. return 0;
  709. }
  710. static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
  711. {
  712. struct super_block *sb = dentry->d_sb;
  713. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  714. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  715. block_t total_count, user_block_count, start_count, ovp_count;
  716. total_count = le64_to_cpu(sbi->raw_super->block_count);
  717. user_block_count = sbi->user_block_count;
  718. start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
  719. ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
  720. buf->f_type = F2FS_SUPER_MAGIC;
  721. buf->f_bsize = sbi->blocksize;
  722. buf->f_blocks = total_count - start_count;
  723. buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
  724. buf->f_bavail = user_block_count - valid_user_blocks(sbi);
  725. buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
  726. buf->f_ffree = buf->f_files - valid_inode_count(sbi);
  727. buf->f_namelen = F2FS_NAME_LEN;
  728. buf->f_fsid.val[0] = (u32)id;
  729. buf->f_fsid.val[1] = (u32)(id >> 32);
  730. return 0;
  731. }
  732. static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
  733. {
  734. struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
  735. if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
  736. if (test_opt(sbi, FORCE_FG_GC))
  737. seq_printf(seq, ",background_gc=%s", "sync");
  738. else
  739. seq_printf(seq, ",background_gc=%s", "on");
  740. } else {
  741. seq_printf(seq, ",background_gc=%s", "off");
  742. }
  743. if (test_opt(sbi, DISABLE_ROLL_FORWARD))
  744. seq_puts(seq, ",disable_roll_forward");
  745. if (test_opt(sbi, DISCARD))
  746. seq_puts(seq, ",discard");
  747. if (test_opt(sbi, NOHEAP))
  748. seq_puts(seq, ",no_heap_alloc");
  749. #ifdef CONFIG_F2FS_FS_XATTR
  750. if (test_opt(sbi, XATTR_USER))
  751. seq_puts(seq, ",user_xattr");
  752. else
  753. seq_puts(seq, ",nouser_xattr");
  754. if (test_opt(sbi, INLINE_XATTR))
  755. seq_puts(seq, ",inline_xattr");
  756. #endif
  757. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  758. if (test_opt(sbi, POSIX_ACL))
  759. seq_puts(seq, ",acl");
  760. else
  761. seq_puts(seq, ",noacl");
  762. #endif
  763. if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
  764. seq_puts(seq, ",disable_ext_identify");
  765. if (test_opt(sbi, INLINE_DATA))
  766. seq_puts(seq, ",inline_data");
  767. else
  768. seq_puts(seq, ",noinline_data");
  769. if (test_opt(sbi, INLINE_DENTRY))
  770. seq_puts(seq, ",inline_dentry");
  771. if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
  772. seq_puts(seq, ",flush_merge");
  773. if (test_opt(sbi, NOBARRIER))
  774. seq_puts(seq, ",nobarrier");
  775. if (test_opt(sbi, FASTBOOT))
  776. seq_puts(seq, ",fastboot");
  777. if (test_opt(sbi, EXTENT_CACHE))
  778. seq_puts(seq, ",extent_cache");
  779. else
  780. seq_puts(seq, ",noextent_cache");
  781. if (test_opt(sbi, DATA_FLUSH))
  782. seq_puts(seq, ",data_flush");
  783. seq_puts(seq, ",mode=");
  784. if (test_opt(sbi, ADAPTIVE))
  785. seq_puts(seq, "adaptive");
  786. else if (test_opt(sbi, LFS))
  787. seq_puts(seq, "lfs");
  788. seq_printf(seq, ",active_logs=%u", sbi->active_logs);
  789. return 0;
  790. }
  791. static int segment_info_seq_show(struct seq_file *seq, void *offset)
  792. {
  793. struct super_block *sb = seq->private;
  794. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  795. unsigned int total_segs =
  796. le32_to_cpu(sbi->raw_super->segment_count_main);
  797. int i;
  798. seq_puts(seq, "format: segment_type|valid_blocks\n"
  799. "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
  800. for (i = 0; i < total_segs; i++) {
  801. struct seg_entry *se = get_seg_entry(sbi, i);
  802. if ((i % 10) == 0)
  803. seq_printf(seq, "%-10d", i);
  804. seq_printf(seq, "%d|%-3u", se->type,
  805. get_valid_blocks(sbi, i, 1));
  806. if ((i % 10) == 9 || i == (total_segs - 1))
  807. seq_putc(seq, '\n');
  808. else
  809. seq_putc(seq, ' ');
  810. }
  811. return 0;
  812. }
  813. static int segment_bits_seq_show(struct seq_file *seq, void *offset)
  814. {
  815. struct super_block *sb = seq->private;
  816. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  817. unsigned int total_segs =
  818. le32_to_cpu(sbi->raw_super->segment_count_main);
  819. int i, j;
  820. seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
  821. "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
  822. for (i = 0; i < total_segs; i++) {
  823. struct seg_entry *se = get_seg_entry(sbi, i);
  824. seq_printf(seq, "%-10d", i);
  825. seq_printf(seq, "%d|%-3u|", se->type,
  826. get_valid_blocks(sbi, i, 1));
  827. for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
  828. seq_printf(seq, "%x ", se->cur_valid_map[j]);
  829. seq_putc(seq, '\n');
  830. }
  831. return 0;
  832. }
  833. #define F2FS_PROC_FILE_DEF(_name) \
  834. static int _name##_open_fs(struct inode *inode, struct file *file) \
  835. { \
  836. return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
  837. } \
  838. \
  839. static const struct file_operations f2fs_seq_##_name##_fops = { \
  840. .open = _name##_open_fs, \
  841. .read = seq_read, \
  842. .llseek = seq_lseek, \
  843. .release = single_release, \
  844. };
  845. F2FS_PROC_FILE_DEF(segment_info);
  846. F2FS_PROC_FILE_DEF(segment_bits);
  847. static void default_options(struct f2fs_sb_info *sbi)
  848. {
  849. /* init some FS parameters */
  850. sbi->active_logs = NR_CURSEG_TYPE;
  851. set_opt(sbi, BG_GC);
  852. set_opt(sbi, INLINE_DATA);
  853. set_opt(sbi, EXTENT_CACHE);
  854. sbi->sb->s_flags |= MS_LAZYTIME;
  855. set_opt(sbi, FLUSH_MERGE);
  856. if (f2fs_sb_mounted_hmsmr(sbi->sb)) {
  857. set_opt_mode(sbi, F2FS_MOUNT_LFS);
  858. set_opt(sbi, DISCARD);
  859. } else {
  860. set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
  861. }
  862. #ifdef CONFIG_F2FS_FS_XATTR
  863. set_opt(sbi, XATTR_USER);
  864. #endif
  865. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  866. set_opt(sbi, POSIX_ACL);
  867. #endif
  868. }
  869. static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  870. {
  871. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  872. struct f2fs_mount_info org_mount_opt;
  873. int err, active_logs;
  874. bool need_restart_gc = false;
  875. bool need_stop_gc = false;
  876. bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
  877. /*
  878. * Save the old mount options in case we
  879. * need to restore them.
  880. */
  881. org_mount_opt = sbi->mount_opt;
  882. active_logs = sbi->active_logs;
  883. /* recover superblocks we couldn't write due to previous RO mount */
  884. if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
  885. err = f2fs_commit_super(sbi, false);
  886. f2fs_msg(sb, KERN_INFO,
  887. "Try to recover all the superblocks, ret: %d", err);
  888. if (!err)
  889. clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  890. }
  891. sbi->mount_opt.opt = 0;
  892. default_options(sbi);
  893. /* parse mount options */
  894. err = parse_options(sb, data);
  895. if (err)
  896. goto restore_opts;
  897. /*
  898. * Previous and new state of filesystem is RO,
  899. * so skip checking GC and FLUSH_MERGE conditions.
  900. */
  901. if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
  902. goto skip;
  903. /* disallow enable/disable extent_cache dynamically */
  904. if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
  905. err = -EINVAL;
  906. f2fs_msg(sbi->sb, KERN_WARNING,
  907. "switch extent_cache option is not allowed");
  908. goto restore_opts;
  909. }
  910. /*
  911. * We stop the GC thread if FS is mounted as RO
  912. * or if background_gc = off is passed in mount
  913. * option. Also sync the filesystem.
  914. */
  915. if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
  916. if (sbi->gc_thread) {
  917. stop_gc_thread(sbi);
  918. need_restart_gc = true;
  919. }
  920. } else if (!sbi->gc_thread) {
  921. err = start_gc_thread(sbi);
  922. if (err)
  923. goto restore_opts;
  924. need_stop_gc = true;
  925. }
  926. if (*flags & MS_RDONLY) {
  927. writeback_inodes_sb(sb, WB_REASON_SYNC);
  928. sync_inodes_sb(sb);
  929. set_sbi_flag(sbi, SBI_IS_DIRTY);
  930. set_sbi_flag(sbi, SBI_IS_CLOSE);
  931. f2fs_sync_fs(sb, 1);
  932. clear_sbi_flag(sbi, SBI_IS_CLOSE);
  933. }
  934. /*
  935. * We stop issue flush thread if FS is mounted as RO
  936. * or if flush_merge is not passed in mount option.
  937. */
  938. if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
  939. destroy_flush_cmd_control(sbi);
  940. } else if (!SM_I(sbi)->cmd_control_info) {
  941. err = create_flush_cmd_control(sbi);
  942. if (err)
  943. goto restore_gc;
  944. }
  945. skip:
  946. /* Update the POSIXACL Flag */
  947. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  948. (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
  949. return 0;
  950. restore_gc:
  951. if (need_restart_gc) {
  952. if (start_gc_thread(sbi))
  953. f2fs_msg(sbi->sb, KERN_WARNING,
  954. "background gc thread has stopped");
  955. } else if (need_stop_gc) {
  956. stop_gc_thread(sbi);
  957. }
  958. restore_opts:
  959. sbi->mount_opt = org_mount_opt;
  960. sbi->active_logs = active_logs;
  961. return err;
  962. }
  963. static struct super_operations f2fs_sops = {
  964. .alloc_inode = f2fs_alloc_inode,
  965. .drop_inode = f2fs_drop_inode,
  966. .destroy_inode = f2fs_destroy_inode,
  967. .write_inode = f2fs_write_inode,
  968. .dirty_inode = f2fs_dirty_inode,
  969. .show_options = f2fs_show_options,
  970. .evict_inode = f2fs_evict_inode,
  971. .put_super = f2fs_put_super,
  972. .sync_fs = f2fs_sync_fs,
  973. .freeze_fs = f2fs_freeze,
  974. .unfreeze_fs = f2fs_unfreeze,
  975. .statfs = f2fs_statfs,
  976. .remount_fs = f2fs_remount,
  977. };
  978. #ifdef CONFIG_F2FS_FS_ENCRYPTION
  979. static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
  980. {
  981. return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  982. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  983. ctx, len, NULL);
  984. }
  985. static int f2fs_key_prefix(struct inode *inode, u8 **key)
  986. {
  987. *key = F2FS_I_SB(inode)->key_prefix;
  988. return F2FS_I_SB(inode)->key_prefix_size;
  989. }
  990. static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
  991. void *fs_data)
  992. {
  993. return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  994. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  995. ctx, len, fs_data, XATTR_CREATE);
  996. }
  997. static unsigned f2fs_max_namelen(struct inode *inode)
  998. {
  999. return S_ISLNK(inode->i_mode) ?
  1000. inode->i_sb->s_blocksize : F2FS_NAME_LEN;
  1001. }
  1002. static struct fscrypt_operations f2fs_cryptops = {
  1003. .get_context = f2fs_get_context,
  1004. .key_prefix = f2fs_key_prefix,
  1005. .set_context = f2fs_set_context,
  1006. .is_encrypted = f2fs_encrypted_inode,
  1007. .empty_dir = f2fs_empty_dir,
  1008. .max_namelen = f2fs_max_namelen,
  1009. };
  1010. #else
  1011. static struct fscrypt_operations f2fs_cryptops = {
  1012. .is_encrypted = f2fs_encrypted_inode,
  1013. };
  1014. #endif
  1015. static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
  1016. u64 ino, u32 generation)
  1017. {
  1018. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1019. struct inode *inode;
  1020. if (check_nid_range(sbi, ino))
  1021. return ERR_PTR(-ESTALE);
  1022. /*
  1023. * f2fs_iget isn't quite right if the inode is currently unallocated!
  1024. * However f2fs_iget currently does appropriate checks to handle stale
  1025. * inodes so everything is OK.
  1026. */
  1027. inode = f2fs_iget(sb, ino);
  1028. if (IS_ERR(inode))
  1029. return ERR_CAST(inode);
  1030. if (unlikely(generation && inode->i_generation != generation)) {
  1031. /* we didn't find the right inode.. */
  1032. iput(inode);
  1033. return ERR_PTR(-ESTALE);
  1034. }
  1035. return inode;
  1036. }
  1037. static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
  1038. int fh_len, int fh_type)
  1039. {
  1040. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  1041. f2fs_nfs_get_inode);
  1042. }
  1043. static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
  1044. int fh_len, int fh_type)
  1045. {
  1046. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  1047. f2fs_nfs_get_inode);
  1048. }
  1049. static const struct export_operations f2fs_export_ops = {
  1050. .fh_to_dentry = f2fs_fh_to_dentry,
  1051. .fh_to_parent = f2fs_fh_to_parent,
  1052. .get_parent = f2fs_get_parent,
  1053. };
  1054. static loff_t max_file_blocks(void)
  1055. {
  1056. loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
  1057. loff_t leaf_count = ADDRS_PER_BLOCK;
  1058. /* two direct node blocks */
  1059. result += (leaf_count * 2);
  1060. /* two indirect node blocks */
  1061. leaf_count *= NIDS_PER_BLOCK;
  1062. result += (leaf_count * 2);
  1063. /* one double indirect node block */
  1064. leaf_count *= NIDS_PER_BLOCK;
  1065. result += leaf_count;
  1066. return result;
  1067. }
  1068. static int __f2fs_commit_super(struct buffer_head *bh,
  1069. struct f2fs_super_block *super)
  1070. {
  1071. lock_buffer(bh);
  1072. if (super)
  1073. memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
  1074. set_buffer_uptodate(bh);
  1075. set_buffer_dirty(bh);
  1076. unlock_buffer(bh);
  1077. /* it's rare case, we can do fua all the time */
  1078. return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
  1079. }
  1080. static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
  1081. struct buffer_head *bh)
  1082. {
  1083. struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
  1084. (bh->b_data + F2FS_SUPER_OFFSET);
  1085. struct super_block *sb = sbi->sb;
  1086. u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
  1087. u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
  1088. u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
  1089. u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
  1090. u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
  1091. u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
  1092. u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
  1093. u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
  1094. u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
  1095. u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
  1096. u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
  1097. u32 segment_count = le32_to_cpu(raw_super->segment_count);
  1098. u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  1099. u64 main_end_blkaddr = main_blkaddr +
  1100. (segment_count_main << log_blocks_per_seg);
  1101. u64 seg_end_blkaddr = segment0_blkaddr +
  1102. (segment_count << log_blocks_per_seg);
  1103. if (segment0_blkaddr != cp_blkaddr) {
  1104. f2fs_msg(sb, KERN_INFO,
  1105. "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
  1106. segment0_blkaddr, cp_blkaddr);
  1107. return true;
  1108. }
  1109. if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
  1110. sit_blkaddr) {
  1111. f2fs_msg(sb, KERN_INFO,
  1112. "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
  1113. cp_blkaddr, sit_blkaddr,
  1114. segment_count_ckpt << log_blocks_per_seg);
  1115. return true;
  1116. }
  1117. if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
  1118. nat_blkaddr) {
  1119. f2fs_msg(sb, KERN_INFO,
  1120. "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
  1121. sit_blkaddr, nat_blkaddr,
  1122. segment_count_sit << log_blocks_per_seg);
  1123. return true;
  1124. }
  1125. if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
  1126. ssa_blkaddr) {
  1127. f2fs_msg(sb, KERN_INFO,
  1128. "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
  1129. nat_blkaddr, ssa_blkaddr,
  1130. segment_count_nat << log_blocks_per_seg);
  1131. return true;
  1132. }
  1133. if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
  1134. main_blkaddr) {
  1135. f2fs_msg(sb, KERN_INFO,
  1136. "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
  1137. ssa_blkaddr, main_blkaddr,
  1138. segment_count_ssa << log_blocks_per_seg);
  1139. return true;
  1140. }
  1141. if (main_end_blkaddr > seg_end_blkaddr) {
  1142. f2fs_msg(sb, KERN_INFO,
  1143. "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
  1144. main_blkaddr,
  1145. segment0_blkaddr +
  1146. (segment_count << log_blocks_per_seg),
  1147. segment_count_main << log_blocks_per_seg);
  1148. return true;
  1149. } else if (main_end_blkaddr < seg_end_blkaddr) {
  1150. int err = 0;
  1151. char *res;
  1152. /* fix in-memory information all the time */
  1153. raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
  1154. segment0_blkaddr) >> log_blocks_per_seg);
  1155. if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
  1156. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  1157. res = "internally";
  1158. } else {
  1159. err = __f2fs_commit_super(bh, NULL);
  1160. res = err ? "failed" : "done";
  1161. }
  1162. f2fs_msg(sb, KERN_INFO,
  1163. "Fix alignment : %s, start(%u) end(%u) block(%u)",
  1164. res, main_blkaddr,
  1165. segment0_blkaddr +
  1166. (segment_count << log_blocks_per_seg),
  1167. segment_count_main << log_blocks_per_seg);
  1168. if (err)
  1169. return true;
  1170. }
  1171. return false;
  1172. }
  1173. static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
  1174. struct buffer_head *bh)
  1175. {
  1176. struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
  1177. (bh->b_data + F2FS_SUPER_OFFSET);
  1178. struct super_block *sb = sbi->sb;
  1179. unsigned int blocksize;
  1180. if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
  1181. f2fs_msg(sb, KERN_INFO,
  1182. "Magic Mismatch, valid(0x%x) - read(0x%x)",
  1183. F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
  1184. return 1;
  1185. }
  1186. /* Currently, support only 4KB page cache size */
  1187. if (F2FS_BLKSIZE != PAGE_SIZE) {
  1188. f2fs_msg(sb, KERN_INFO,
  1189. "Invalid page_cache_size (%lu), supports only 4KB\n",
  1190. PAGE_SIZE);
  1191. return 1;
  1192. }
  1193. /* Currently, support only 4KB block size */
  1194. blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
  1195. if (blocksize != F2FS_BLKSIZE) {
  1196. f2fs_msg(sb, KERN_INFO,
  1197. "Invalid blocksize (%u), supports only 4KB\n",
  1198. blocksize);
  1199. return 1;
  1200. }
  1201. /* check log blocks per segment */
  1202. if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
  1203. f2fs_msg(sb, KERN_INFO,
  1204. "Invalid log blocks per segment (%u)\n",
  1205. le32_to_cpu(raw_super->log_blocks_per_seg));
  1206. return 1;
  1207. }
  1208. /* Currently, support 512/1024/2048/4096 bytes sector size */
  1209. if (le32_to_cpu(raw_super->log_sectorsize) >
  1210. F2FS_MAX_LOG_SECTOR_SIZE ||
  1211. le32_to_cpu(raw_super->log_sectorsize) <
  1212. F2FS_MIN_LOG_SECTOR_SIZE) {
  1213. f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
  1214. le32_to_cpu(raw_super->log_sectorsize));
  1215. return 1;
  1216. }
  1217. if (le32_to_cpu(raw_super->log_sectors_per_block) +
  1218. le32_to_cpu(raw_super->log_sectorsize) !=
  1219. F2FS_MAX_LOG_SECTOR_SIZE) {
  1220. f2fs_msg(sb, KERN_INFO,
  1221. "Invalid log sectors per block(%u) log sectorsize(%u)",
  1222. le32_to_cpu(raw_super->log_sectors_per_block),
  1223. le32_to_cpu(raw_super->log_sectorsize));
  1224. return 1;
  1225. }
  1226. /* check reserved ino info */
  1227. if (le32_to_cpu(raw_super->node_ino) != 1 ||
  1228. le32_to_cpu(raw_super->meta_ino) != 2 ||
  1229. le32_to_cpu(raw_super->root_ino) != 3) {
  1230. f2fs_msg(sb, KERN_INFO,
  1231. "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
  1232. le32_to_cpu(raw_super->node_ino),
  1233. le32_to_cpu(raw_super->meta_ino),
  1234. le32_to_cpu(raw_super->root_ino));
  1235. return 1;
  1236. }
  1237. /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
  1238. if (sanity_check_area_boundary(sbi, bh))
  1239. return 1;
  1240. return 0;
  1241. }
  1242. int sanity_check_ckpt(struct f2fs_sb_info *sbi)
  1243. {
  1244. unsigned int total, fsmeta;
  1245. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  1246. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  1247. total = le32_to_cpu(raw_super->segment_count);
  1248. fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
  1249. fsmeta += le32_to_cpu(raw_super->segment_count_sit);
  1250. fsmeta += le32_to_cpu(raw_super->segment_count_nat);
  1251. fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
  1252. fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
  1253. if (unlikely(fsmeta >= total))
  1254. return 1;
  1255. if (unlikely(f2fs_cp_error(sbi))) {
  1256. f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
  1257. return 1;
  1258. }
  1259. return 0;
  1260. }
  1261. static void init_sb_info(struct f2fs_sb_info *sbi)
  1262. {
  1263. struct f2fs_super_block *raw_super = sbi->raw_super;
  1264. sbi->log_sectors_per_block =
  1265. le32_to_cpu(raw_super->log_sectors_per_block);
  1266. sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
  1267. sbi->blocksize = 1 << sbi->log_blocksize;
  1268. sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  1269. sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
  1270. sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  1271. sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  1272. sbi->total_sections = le32_to_cpu(raw_super->section_count);
  1273. sbi->total_node_count =
  1274. (le32_to_cpu(raw_super->segment_count_nat) / 2)
  1275. * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
  1276. sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
  1277. sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
  1278. sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
  1279. sbi->cur_victim_sec = NULL_SECNO;
  1280. sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
  1281. sbi->dir_level = DEF_DIR_LEVEL;
  1282. sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
  1283. sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
  1284. clear_sbi_flag(sbi, SBI_NEED_FSCK);
  1285. INIT_LIST_HEAD(&sbi->s_list);
  1286. mutex_init(&sbi->umount_mutex);
  1287. mutex_init(&sbi->wio_mutex[NODE]);
  1288. mutex_init(&sbi->wio_mutex[DATA]);
  1289. #ifdef CONFIG_F2FS_FS_ENCRYPTION
  1290. memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
  1291. F2FS_KEY_DESC_PREFIX_SIZE);
  1292. sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
  1293. #endif
  1294. }
  1295. static int init_percpu_info(struct f2fs_sb_info *sbi)
  1296. {
  1297. int i, err;
  1298. for (i = 0; i < NR_COUNT_TYPE; i++) {
  1299. err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
  1300. if (err)
  1301. return err;
  1302. }
  1303. err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
  1304. if (err)
  1305. return err;
  1306. return percpu_counter_init(&sbi->total_valid_inode_count, 0,
  1307. GFP_KERNEL);
  1308. }
  1309. /*
  1310. * Read f2fs raw super block.
  1311. * Because we have two copies of super block, so read both of them
  1312. * to get the first valid one. If any one of them is broken, we pass
  1313. * them recovery flag back to the caller.
  1314. */
  1315. static int read_raw_super_block(struct f2fs_sb_info *sbi,
  1316. struct f2fs_super_block **raw_super,
  1317. int *valid_super_block, int *recovery)
  1318. {
  1319. struct super_block *sb = sbi->sb;
  1320. int block;
  1321. struct buffer_head *bh;
  1322. struct f2fs_super_block *super;
  1323. int err = 0;
  1324. super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
  1325. if (!super)
  1326. return -ENOMEM;
  1327. for (block = 0; block < 2; block++) {
  1328. bh = sb_bread(sb, block);
  1329. if (!bh) {
  1330. f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
  1331. block + 1);
  1332. err = -EIO;
  1333. continue;
  1334. }
  1335. /* sanity checking of raw super */
  1336. if (sanity_check_raw_super(sbi, bh)) {
  1337. f2fs_msg(sb, KERN_ERR,
  1338. "Can't find valid F2FS filesystem in %dth superblock",
  1339. block + 1);
  1340. err = -EINVAL;
  1341. brelse(bh);
  1342. continue;
  1343. }
  1344. if (!*raw_super) {
  1345. memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
  1346. sizeof(*super));
  1347. *valid_super_block = block;
  1348. *raw_super = super;
  1349. }
  1350. brelse(bh);
  1351. }
  1352. /* Fail to read any one of the superblocks*/
  1353. if (err < 0)
  1354. *recovery = 1;
  1355. /* No valid superblock */
  1356. if (!*raw_super)
  1357. kfree(super);
  1358. else
  1359. err = 0;
  1360. return err;
  1361. }
  1362. int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
  1363. {
  1364. struct buffer_head *bh;
  1365. int err;
  1366. if ((recover && f2fs_readonly(sbi->sb)) ||
  1367. bdev_read_only(sbi->sb->s_bdev)) {
  1368. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  1369. return -EROFS;
  1370. }
  1371. /* write back-up superblock first */
  1372. bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
  1373. if (!bh)
  1374. return -EIO;
  1375. err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
  1376. brelse(bh);
  1377. /* if we are in recovery path, skip writing valid superblock */
  1378. if (recover || err)
  1379. return err;
  1380. /* write current valid superblock */
  1381. bh = sb_getblk(sbi->sb, sbi->valid_super_block);
  1382. if (!bh)
  1383. return -EIO;
  1384. err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
  1385. brelse(bh);
  1386. return err;
  1387. }
  1388. static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  1389. {
  1390. struct f2fs_sb_info *sbi;
  1391. struct f2fs_super_block *raw_super;
  1392. struct inode *root;
  1393. int err;
  1394. bool retry = true, need_fsck = false;
  1395. char *options = NULL;
  1396. int recovery, i, valid_super_block;
  1397. struct curseg_info *seg_i;
  1398. try_onemore:
  1399. err = -EINVAL;
  1400. raw_super = NULL;
  1401. valid_super_block = -1;
  1402. recovery = 0;
  1403. /* allocate memory for f2fs-specific super block info */
  1404. sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
  1405. if (!sbi)
  1406. return -ENOMEM;
  1407. sbi->sb = sb;
  1408. /* Load the checksum driver */
  1409. sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
  1410. if (IS_ERR(sbi->s_chksum_driver)) {
  1411. f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
  1412. err = PTR_ERR(sbi->s_chksum_driver);
  1413. sbi->s_chksum_driver = NULL;
  1414. goto free_sbi;
  1415. }
  1416. /* set a block size */
  1417. if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
  1418. f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
  1419. goto free_sbi;
  1420. }
  1421. err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
  1422. &recovery);
  1423. if (err)
  1424. goto free_sbi;
  1425. sb->s_fs_info = sbi;
  1426. sbi->raw_super = raw_super;
  1427. default_options(sbi);
  1428. /* parse mount options */
  1429. options = kstrdup((const char *)data, GFP_KERNEL);
  1430. if (data && !options) {
  1431. err = -ENOMEM;
  1432. goto free_sb_buf;
  1433. }
  1434. err = parse_options(sb, options);
  1435. if (err)
  1436. goto free_options;
  1437. sbi->max_file_blocks = max_file_blocks();
  1438. sb->s_maxbytes = sbi->max_file_blocks <<
  1439. le32_to_cpu(raw_super->log_blocksize);
  1440. sb->s_max_links = F2FS_LINK_MAX;
  1441. get_random_bytes(&sbi->s_next_generation, sizeof(u32));
  1442. sb->s_op = &f2fs_sops;
  1443. sb->s_cop = &f2fs_cryptops;
  1444. sb->s_xattr = f2fs_xattr_handlers;
  1445. sb->s_export_op = &f2fs_export_ops;
  1446. sb->s_magic = F2FS_SUPER_MAGIC;
  1447. sb->s_time_gran = 1;
  1448. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  1449. (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
  1450. memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
  1451. /* init f2fs-specific super block info */
  1452. sbi->valid_super_block = valid_super_block;
  1453. mutex_init(&sbi->gc_mutex);
  1454. mutex_init(&sbi->cp_mutex);
  1455. init_rwsem(&sbi->node_write);
  1456. /* disallow all the data/node/meta page writes */
  1457. set_sbi_flag(sbi, SBI_POR_DOING);
  1458. spin_lock_init(&sbi->stat_lock);
  1459. init_rwsem(&sbi->read_io.io_rwsem);
  1460. sbi->read_io.sbi = sbi;
  1461. sbi->read_io.bio = NULL;
  1462. for (i = 0; i < NR_PAGE_TYPE; i++) {
  1463. init_rwsem(&sbi->write_io[i].io_rwsem);
  1464. sbi->write_io[i].sbi = sbi;
  1465. sbi->write_io[i].bio = NULL;
  1466. }
  1467. init_rwsem(&sbi->cp_rwsem);
  1468. init_waitqueue_head(&sbi->cp_wait);
  1469. init_sb_info(sbi);
  1470. err = init_percpu_info(sbi);
  1471. if (err)
  1472. goto free_options;
  1473. /* get an inode for meta space */
  1474. sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
  1475. if (IS_ERR(sbi->meta_inode)) {
  1476. f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
  1477. err = PTR_ERR(sbi->meta_inode);
  1478. goto free_options;
  1479. }
  1480. err = get_valid_checkpoint(sbi);
  1481. if (err) {
  1482. f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
  1483. goto free_meta_inode;
  1484. }
  1485. sbi->total_valid_node_count =
  1486. le32_to_cpu(sbi->ckpt->valid_node_count);
  1487. percpu_counter_set(&sbi->total_valid_inode_count,
  1488. le32_to_cpu(sbi->ckpt->valid_inode_count));
  1489. sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
  1490. sbi->total_valid_block_count =
  1491. le64_to_cpu(sbi->ckpt->valid_block_count);
  1492. sbi->last_valid_block_count = sbi->total_valid_block_count;
  1493. for (i = 0; i < NR_INODE_TYPE; i++) {
  1494. INIT_LIST_HEAD(&sbi->inode_list[i]);
  1495. spin_lock_init(&sbi->inode_lock[i]);
  1496. }
  1497. init_extent_cache_info(sbi);
  1498. init_ino_entry_info(sbi);
  1499. /* setup f2fs internal modules */
  1500. err = build_segment_manager(sbi);
  1501. if (err) {
  1502. f2fs_msg(sb, KERN_ERR,
  1503. "Failed to initialize F2FS segment manager");
  1504. goto free_sm;
  1505. }
  1506. err = build_node_manager(sbi);
  1507. if (err) {
  1508. f2fs_msg(sb, KERN_ERR,
  1509. "Failed to initialize F2FS node manager");
  1510. goto free_nm;
  1511. }
  1512. /* For write statistics */
  1513. if (sb->s_bdev->bd_part)
  1514. sbi->sectors_written_start =
  1515. (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
  1516. /* Read accumulated write IO statistics if exists */
  1517. seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
  1518. if (__exist_node_summaries(sbi))
  1519. sbi->kbytes_written =
  1520. le64_to_cpu(seg_i->journal->info.kbytes_written);
  1521. build_gc_manager(sbi);
  1522. /* get an inode for node space */
  1523. sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
  1524. if (IS_ERR(sbi->node_inode)) {
  1525. f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
  1526. err = PTR_ERR(sbi->node_inode);
  1527. goto free_nm;
  1528. }
  1529. f2fs_join_shrinker(sbi);
  1530. /* if there are nt orphan nodes free them */
  1531. err = recover_orphan_inodes(sbi);
  1532. if (err)
  1533. goto free_node_inode;
  1534. /* read root inode and dentry */
  1535. root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
  1536. if (IS_ERR(root)) {
  1537. f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
  1538. err = PTR_ERR(root);
  1539. goto free_node_inode;
  1540. }
  1541. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  1542. iput(root);
  1543. err = -EINVAL;
  1544. goto free_node_inode;
  1545. }
  1546. sb->s_root = d_make_root(root); /* allocate root dentry */
  1547. if (!sb->s_root) {
  1548. err = -ENOMEM;
  1549. goto free_root_inode;
  1550. }
  1551. err = f2fs_build_stats(sbi);
  1552. if (err)
  1553. goto free_root_inode;
  1554. if (f2fs_proc_root)
  1555. sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
  1556. if (sbi->s_proc) {
  1557. proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
  1558. &f2fs_seq_segment_info_fops, sb);
  1559. proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
  1560. &f2fs_seq_segment_bits_fops, sb);
  1561. }
  1562. sbi->s_kobj.kset = f2fs_kset;
  1563. init_completion(&sbi->s_kobj_unregister);
  1564. err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
  1565. "%s", sb->s_id);
  1566. if (err)
  1567. goto free_proc;
  1568. /* recover fsynced data */
  1569. if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
  1570. /*
  1571. * mount should be failed, when device has readonly mode, and
  1572. * previous checkpoint was not done by clean system shutdown.
  1573. */
  1574. if (bdev_read_only(sb->s_bdev) &&
  1575. !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
  1576. err = -EROFS;
  1577. goto free_kobj;
  1578. }
  1579. if (need_fsck)
  1580. set_sbi_flag(sbi, SBI_NEED_FSCK);
  1581. err = recover_fsync_data(sbi, false);
  1582. if (err < 0) {
  1583. need_fsck = true;
  1584. f2fs_msg(sb, KERN_ERR,
  1585. "Cannot recover all fsync data errno=%d", err);
  1586. goto free_kobj;
  1587. }
  1588. } else {
  1589. err = recover_fsync_data(sbi, true);
  1590. if (!f2fs_readonly(sb) && err > 0) {
  1591. err = -EINVAL;
  1592. f2fs_msg(sb, KERN_ERR,
  1593. "Need to recover fsync data");
  1594. goto free_kobj;
  1595. }
  1596. }
  1597. /* recover_fsync_data() cleared this already */
  1598. clear_sbi_flag(sbi, SBI_POR_DOING);
  1599. /*
  1600. * If filesystem is not mounted as read-only then
  1601. * do start the gc_thread.
  1602. */
  1603. if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
  1604. /* After POR, we can run background GC thread.*/
  1605. err = start_gc_thread(sbi);
  1606. if (err)
  1607. goto free_kobj;
  1608. }
  1609. kfree(options);
  1610. /* recover broken superblock */
  1611. if (recovery) {
  1612. err = f2fs_commit_super(sbi, true);
  1613. f2fs_msg(sb, KERN_INFO,
  1614. "Try to recover %dth superblock, ret: %d",
  1615. sbi->valid_super_block ? 1 : 2, err);
  1616. }
  1617. f2fs_update_time(sbi, CP_TIME);
  1618. f2fs_update_time(sbi, REQ_TIME);
  1619. return 0;
  1620. free_kobj:
  1621. f2fs_sync_inode_meta(sbi);
  1622. kobject_del(&sbi->s_kobj);
  1623. kobject_put(&sbi->s_kobj);
  1624. wait_for_completion(&sbi->s_kobj_unregister);
  1625. free_proc:
  1626. if (sbi->s_proc) {
  1627. remove_proc_entry("segment_info", sbi->s_proc);
  1628. remove_proc_entry("segment_bits", sbi->s_proc);
  1629. remove_proc_entry(sb->s_id, f2fs_proc_root);
  1630. }
  1631. f2fs_destroy_stats(sbi);
  1632. free_root_inode:
  1633. dput(sb->s_root);
  1634. sb->s_root = NULL;
  1635. free_node_inode:
  1636. mutex_lock(&sbi->umount_mutex);
  1637. f2fs_leave_shrinker(sbi);
  1638. iput(sbi->node_inode);
  1639. mutex_unlock(&sbi->umount_mutex);
  1640. free_nm:
  1641. destroy_node_manager(sbi);
  1642. free_sm:
  1643. destroy_segment_manager(sbi);
  1644. kfree(sbi->ckpt);
  1645. free_meta_inode:
  1646. make_bad_inode(sbi->meta_inode);
  1647. iput(sbi->meta_inode);
  1648. free_options:
  1649. destroy_percpu_info(sbi);
  1650. kfree(options);
  1651. free_sb_buf:
  1652. kfree(raw_super);
  1653. free_sbi:
  1654. if (sbi->s_chksum_driver)
  1655. crypto_free_shash(sbi->s_chksum_driver);
  1656. kfree(sbi);
  1657. /* give only one another chance */
  1658. if (retry) {
  1659. retry = false;
  1660. shrink_dcache_sb(sb);
  1661. goto try_onemore;
  1662. }
  1663. return err;
  1664. }
  1665. static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
  1666. const char *dev_name, void *data)
  1667. {
  1668. return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
  1669. }
  1670. static void kill_f2fs_super(struct super_block *sb)
  1671. {
  1672. if (sb->s_root)
  1673. set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
  1674. kill_block_super(sb);
  1675. }
  1676. static struct file_system_type f2fs_fs_type = {
  1677. .owner = THIS_MODULE,
  1678. .name = "f2fs",
  1679. .mount = f2fs_mount,
  1680. .kill_sb = kill_f2fs_super,
  1681. .fs_flags = FS_REQUIRES_DEV,
  1682. };
  1683. MODULE_ALIAS_FS("f2fs");
  1684. static int __init init_inodecache(void)
  1685. {
  1686. f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
  1687. sizeof(struct f2fs_inode_info), 0,
  1688. SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
  1689. if (!f2fs_inode_cachep)
  1690. return -ENOMEM;
  1691. return 0;
  1692. }
  1693. static void destroy_inodecache(void)
  1694. {
  1695. /*
  1696. * Make sure all delayed rcu free inodes are flushed before we
  1697. * destroy cache.
  1698. */
  1699. rcu_barrier();
  1700. kmem_cache_destroy(f2fs_inode_cachep);
  1701. }
  1702. static int __init init_f2fs_fs(void)
  1703. {
  1704. int err;
  1705. f2fs_build_trace_ios();
  1706. err = init_inodecache();
  1707. if (err)
  1708. goto fail;
  1709. err = create_node_manager_caches();
  1710. if (err)
  1711. goto free_inodecache;
  1712. err = create_segment_manager_caches();
  1713. if (err)
  1714. goto free_node_manager_caches;
  1715. err = create_checkpoint_caches();
  1716. if (err)
  1717. goto free_segment_manager_caches;
  1718. err = create_extent_cache();
  1719. if (err)
  1720. goto free_checkpoint_caches;
  1721. f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
  1722. if (!f2fs_kset) {
  1723. err = -ENOMEM;
  1724. goto free_extent_cache;
  1725. }
  1726. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1727. f2fs_fault_inject.kset = f2fs_kset;
  1728. f2fs_build_fault_attr(0);
  1729. err = kobject_init_and_add(&f2fs_fault_inject, &f2fs_fault_ktype,
  1730. NULL, "fault_injection");
  1731. if (err) {
  1732. f2fs_fault_inject.kset = NULL;
  1733. goto free_kset;
  1734. }
  1735. #endif
  1736. err = register_shrinker(&f2fs_shrinker_info);
  1737. if (err)
  1738. goto free_kset;
  1739. err = register_filesystem(&f2fs_fs_type);
  1740. if (err)
  1741. goto free_shrinker;
  1742. err = f2fs_create_root_stats();
  1743. if (err)
  1744. goto free_filesystem;
  1745. f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
  1746. return 0;
  1747. free_filesystem:
  1748. unregister_filesystem(&f2fs_fs_type);
  1749. free_shrinker:
  1750. unregister_shrinker(&f2fs_shrinker_info);
  1751. free_kset:
  1752. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1753. if (f2fs_fault_inject.kset)
  1754. kobject_put(&f2fs_fault_inject);
  1755. #endif
  1756. kset_unregister(f2fs_kset);
  1757. free_extent_cache:
  1758. destroy_extent_cache();
  1759. free_checkpoint_caches:
  1760. destroy_checkpoint_caches();
  1761. free_segment_manager_caches:
  1762. destroy_segment_manager_caches();
  1763. free_node_manager_caches:
  1764. destroy_node_manager_caches();
  1765. free_inodecache:
  1766. destroy_inodecache();
  1767. fail:
  1768. return err;
  1769. }
  1770. static void __exit exit_f2fs_fs(void)
  1771. {
  1772. remove_proc_entry("fs/f2fs", NULL);
  1773. f2fs_destroy_root_stats();
  1774. unregister_filesystem(&f2fs_fs_type);
  1775. unregister_shrinker(&f2fs_shrinker_info);
  1776. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1777. kobject_put(&f2fs_fault_inject);
  1778. #endif
  1779. kset_unregister(f2fs_kset);
  1780. destroy_extent_cache();
  1781. destroy_checkpoint_caches();
  1782. destroy_segment_manager_caches();
  1783. destroy_node_manager_caches();
  1784. destroy_inodecache();
  1785. f2fs_destroy_trace_ios();
  1786. }
  1787. module_init(init_f2fs_fs)
  1788. module_exit(exit_f2fs_fs)
  1789. MODULE_AUTHOR("Samsung Electronics's Praesto Team");
  1790. MODULE_DESCRIPTION("Flash Friendly File System");
  1791. MODULE_LICENSE("GPL");