super.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392
  1. /*
  2. * linux/fs/super.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * super.c contains code to handle: - mount structures
  7. * - super-block tables
  8. * - filesystem drivers list
  9. * - mount system call
  10. * - umount system call
  11. * - ustat system call
  12. *
  13. * GK 2/5/95 - Changed to support mounting the root fs via NFS
  14. *
  15. * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
  16. * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
  17. * Added options to /proc/mounts:
  18. * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
  19. * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
  20. * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
  21. */
  22. #include <linux/export.h>
  23. #include <linux/slab.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/mount.h>
  26. #include <linux/security.h>
  27. #include <linux/writeback.h> /* for the emergency remount stuff */
  28. #include <linux/idr.h>
  29. #include <linux/mutex.h>
  30. #include <linux/backing-dev.h>
  31. #include <linux/rculist_bl.h>
  32. #include <linux/cleancache.h>
  33. #include <linux/fsnotify.h>
  34. #include <linux/lockdep.h>
  35. #include "internal.h"
  36. LIST_HEAD(super_blocks);
  37. DEFINE_SPINLOCK(sb_lock);
  38. static char *sb_writers_name[SB_FREEZE_LEVELS] = {
  39. "sb_writers",
  40. "sb_pagefaults",
  41. "sb_internal",
  42. };
  43. /*
  44. * One thing we have to be careful of with a per-sb shrinker is that we don't
  45. * drop the last active reference to the superblock from within the shrinker.
  46. * If that happens we could trigger unregistering the shrinker from within the
  47. * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
  48. * take a passive reference to the superblock to avoid this from occurring.
  49. */
  50. static unsigned long super_cache_scan(struct shrinker *shrink,
  51. struct shrink_control *sc)
  52. {
  53. struct super_block *sb;
  54. long fs_objects = 0;
  55. long total_objects;
  56. long freed = 0;
  57. long dentries;
  58. long inodes;
  59. sb = container_of(shrink, struct super_block, s_shrink);
  60. /*
  61. * Deadlock avoidance. We may hold various FS locks, and we don't want
  62. * to recurse into the FS that called us in clear_inode() and friends..
  63. */
  64. if (!(sc->gfp_mask & __GFP_FS))
  65. return SHRINK_STOP;
  66. if (!grab_super_passive(sb))
  67. return SHRINK_STOP;
  68. if (sb->s_op->nr_cached_objects)
  69. fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid);
  70. inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
  71. dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
  72. total_objects = dentries + inodes + fs_objects + 1;
  73. /* proportion the scan between the caches */
  74. dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
  75. inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
  76. /*
  77. * prune the dcache first as the icache is pinned by it, then
  78. * prune the icache, followed by the filesystem specific caches
  79. */
  80. freed = prune_dcache_sb(sb, dentries, sc->nid);
  81. freed += prune_icache_sb(sb, inodes, sc->nid);
  82. if (fs_objects) {
  83. fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
  84. total_objects);
  85. freed += sb->s_op->free_cached_objects(sb, fs_objects,
  86. sc->nid);
  87. }
  88. drop_super(sb);
  89. return freed;
  90. }
  91. static unsigned long super_cache_count(struct shrinker *shrink,
  92. struct shrink_control *sc)
  93. {
  94. struct super_block *sb;
  95. long total_objects = 0;
  96. sb = container_of(shrink, struct super_block, s_shrink);
  97. /*
  98. * Don't call grab_super_passive as it is a potential
  99. * scalability bottleneck. The counts could get updated
  100. * between super_cache_count and super_cache_scan anyway.
  101. * Call to super_cache_count with shrinker_rwsem held
  102. * ensures the safety of call to list_lru_count_node() and
  103. * s_op->nr_cached_objects().
  104. */
  105. if (sb->s_op && sb->s_op->nr_cached_objects)
  106. total_objects = sb->s_op->nr_cached_objects(sb,
  107. sc->nid);
  108. total_objects += list_lru_count_node(&sb->s_dentry_lru,
  109. sc->nid);
  110. total_objects += list_lru_count_node(&sb->s_inode_lru,
  111. sc->nid);
  112. total_objects = vfs_pressure_ratio(total_objects);
  113. return total_objects;
  114. }
  115. /**
  116. * destroy_super - frees a superblock
  117. * @s: superblock to free
  118. *
  119. * Frees a superblock.
  120. */
  121. static void destroy_super(struct super_block *s)
  122. {
  123. int i;
  124. list_lru_destroy(&s->s_dentry_lru);
  125. list_lru_destroy(&s->s_inode_lru);
  126. for (i = 0; i < SB_FREEZE_LEVELS; i++)
  127. percpu_counter_destroy(&s->s_writers.counter[i]);
  128. security_sb_free(s);
  129. WARN_ON(!list_empty(&s->s_mounts));
  130. kfree(s->s_subtype);
  131. kfree(s->s_options);
  132. kfree_rcu(s, rcu);
  133. }
  134. /**
  135. * alloc_super - create new superblock
  136. * @type: filesystem type superblock should belong to
  137. * @flags: the mount flags
  138. *
  139. * Allocates and initializes a new &struct super_block. alloc_super()
  140. * returns a pointer new superblock or %NULL if allocation had failed.
  141. */
  142. static struct super_block *alloc_super(struct file_system_type *type, int flags)
  143. {
  144. struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
  145. static const struct super_operations default_op;
  146. int i;
  147. if (!s)
  148. return NULL;
  149. INIT_LIST_HEAD(&s->s_mounts);
  150. if (security_sb_alloc(s))
  151. goto fail;
  152. for (i = 0; i < SB_FREEZE_LEVELS; i++) {
  153. if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0)
  154. goto fail;
  155. lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
  156. &type->s_writers_key[i], 0);
  157. }
  158. init_waitqueue_head(&s->s_writers.wait);
  159. init_waitqueue_head(&s->s_writers.wait_unfrozen);
  160. s->s_flags = flags;
  161. s->s_bdi = &default_backing_dev_info;
  162. INIT_HLIST_NODE(&s->s_instances);
  163. INIT_HLIST_BL_HEAD(&s->s_anon);
  164. INIT_LIST_HEAD(&s->s_inodes);
  165. if (list_lru_init(&s->s_dentry_lru))
  166. goto fail;
  167. if (list_lru_init(&s->s_inode_lru))
  168. goto fail;
  169. init_rwsem(&s->s_umount);
  170. lockdep_set_class(&s->s_umount, &type->s_umount_key);
  171. /*
  172. * sget() can have s_umount recursion.
  173. *
  174. * When it cannot find a suitable sb, it allocates a new
  175. * one (this one), and tries again to find a suitable old
  176. * one.
  177. *
  178. * In case that succeeds, it will acquire the s_umount
  179. * lock of the old one. Since these are clearly distrinct
  180. * locks, and this object isn't exposed yet, there's no
  181. * risk of deadlocks.
  182. *
  183. * Annotate this by putting this lock in a different
  184. * subclass.
  185. */
  186. down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
  187. s->s_count = 1;
  188. atomic_set(&s->s_active, 1);
  189. mutex_init(&s->s_vfs_rename_mutex);
  190. lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
  191. mutex_init(&s->s_dquot.dqio_mutex);
  192. mutex_init(&s->s_dquot.dqonoff_mutex);
  193. s->s_maxbytes = MAX_NON_LFS;
  194. s->s_op = &default_op;
  195. s->s_time_gran = 1000000000;
  196. s->cleancache_poolid = -1;
  197. s->s_shrink.seeks = DEFAULT_SEEKS;
  198. s->s_shrink.scan_objects = super_cache_scan;
  199. s->s_shrink.count_objects = super_cache_count;
  200. s->s_shrink.batch = 1024;
  201. s->s_shrink.flags = SHRINKER_NUMA_AWARE;
  202. return s;
  203. fail:
  204. destroy_super(s);
  205. return NULL;
  206. }
  207. /* Superblock refcounting */
  208. /*
  209. * Drop a superblock's refcount. The caller must hold sb_lock.
  210. */
  211. static void __put_super(struct super_block *sb)
  212. {
  213. if (!--sb->s_count) {
  214. list_del_init(&sb->s_list);
  215. destroy_super(sb);
  216. }
  217. }
  218. /**
  219. * put_super - drop a temporary reference to superblock
  220. * @sb: superblock in question
  221. *
  222. * Drops a temporary reference, frees superblock if there's no
  223. * references left.
  224. */
  225. static void put_super(struct super_block *sb)
  226. {
  227. spin_lock(&sb_lock);
  228. __put_super(sb);
  229. spin_unlock(&sb_lock);
  230. }
  231. /**
  232. * deactivate_locked_super - drop an active reference to superblock
  233. * @s: superblock to deactivate
  234. *
  235. * Drops an active reference to superblock, converting it into a temprory
  236. * one if there is no other active references left. In that case we
  237. * tell fs driver to shut it down and drop the temporary reference we
  238. * had just acquired.
  239. *
  240. * Caller holds exclusive lock on superblock; that lock is released.
  241. */
  242. void deactivate_locked_super(struct super_block *s)
  243. {
  244. struct file_system_type *fs = s->s_type;
  245. if (atomic_dec_and_test(&s->s_active)) {
  246. cleancache_invalidate_fs(s);
  247. unregister_shrinker(&s->s_shrink);
  248. fs->kill_sb(s);
  249. put_filesystem(fs);
  250. put_super(s);
  251. } else {
  252. up_write(&s->s_umount);
  253. }
  254. }
  255. EXPORT_SYMBOL(deactivate_locked_super);
  256. /**
  257. * deactivate_super - drop an active reference to superblock
  258. * @s: superblock to deactivate
  259. *
  260. * Variant of deactivate_locked_super(), except that superblock is *not*
  261. * locked by caller. If we are going to drop the final active reference,
  262. * lock will be acquired prior to that.
  263. */
  264. void deactivate_super(struct super_block *s)
  265. {
  266. if (!atomic_add_unless(&s->s_active, -1, 1)) {
  267. down_write(&s->s_umount);
  268. deactivate_locked_super(s);
  269. }
  270. }
  271. EXPORT_SYMBOL(deactivate_super);
  272. /**
  273. * grab_super - acquire an active reference
  274. * @s: reference we are trying to make active
  275. *
  276. * Tries to acquire an active reference. grab_super() is used when we
  277. * had just found a superblock in super_blocks or fs_type->fs_supers
  278. * and want to turn it into a full-blown active reference. grab_super()
  279. * is called with sb_lock held and drops it. Returns 1 in case of
  280. * success, 0 if we had failed (superblock contents was already dead or
  281. * dying when grab_super() had been called). Note that this is only
  282. * called for superblocks not in rundown mode (== ones still on ->fs_supers
  283. * of their type), so increment of ->s_count is OK here.
  284. */
  285. static int grab_super(struct super_block *s) __releases(sb_lock)
  286. {
  287. s->s_count++;
  288. spin_unlock(&sb_lock);
  289. down_write(&s->s_umount);
  290. if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
  291. put_super(s);
  292. return 1;
  293. }
  294. up_write(&s->s_umount);
  295. put_super(s);
  296. return 0;
  297. }
  298. /*
  299. * grab_super_passive - acquire a passive reference
  300. * @sb: reference we are trying to grab
  301. *
  302. * Tries to acquire a passive reference. This is used in places where we
  303. * cannot take an active reference but we need to ensure that the
  304. * superblock does not go away while we are working on it. It returns
  305. * false if a reference was not gained, and returns true with the s_umount
  306. * lock held in read mode if a reference is gained. On successful return,
  307. * the caller must drop the s_umount lock and the passive reference when
  308. * done.
  309. */
  310. bool grab_super_passive(struct super_block *sb)
  311. {
  312. spin_lock(&sb_lock);
  313. if (hlist_unhashed(&sb->s_instances)) {
  314. spin_unlock(&sb_lock);
  315. return false;
  316. }
  317. sb->s_count++;
  318. spin_unlock(&sb_lock);
  319. if (down_read_trylock(&sb->s_umount)) {
  320. if (sb->s_root && (sb->s_flags & MS_BORN))
  321. return true;
  322. up_read(&sb->s_umount);
  323. }
  324. put_super(sb);
  325. return false;
  326. }
  327. /**
  328. * generic_shutdown_super - common helper for ->kill_sb()
  329. * @sb: superblock to kill
  330. *
  331. * generic_shutdown_super() does all fs-independent work on superblock
  332. * shutdown. Typical ->kill_sb() should pick all fs-specific objects
  333. * that need destruction out of superblock, call generic_shutdown_super()
  334. * and release aforementioned objects. Note: dentries and inodes _are_
  335. * taken care of and do not need specific handling.
  336. *
  337. * Upon calling this function, the filesystem may no longer alter or
  338. * rearrange the set of dentries belonging to this super_block, nor may it
  339. * change the attachments of dentries to inodes.
  340. */
  341. void generic_shutdown_super(struct super_block *sb)
  342. {
  343. const struct super_operations *sop = sb->s_op;
  344. if (sb->s_root) {
  345. shrink_dcache_for_umount(sb);
  346. sync_filesystem(sb);
  347. sb->s_flags &= ~MS_ACTIVE;
  348. fsnotify_unmount_inodes(&sb->s_inodes);
  349. evict_inodes(sb);
  350. if (sb->s_dio_done_wq) {
  351. destroy_workqueue(sb->s_dio_done_wq);
  352. sb->s_dio_done_wq = NULL;
  353. }
  354. if (sop->put_super)
  355. sop->put_super(sb);
  356. if (!list_empty(&sb->s_inodes)) {
  357. printk("VFS: Busy inodes after unmount of %s. "
  358. "Self-destruct in 5 seconds. Have a nice day...\n",
  359. sb->s_id);
  360. }
  361. }
  362. spin_lock(&sb_lock);
  363. /* should be initialized for __put_super_and_need_restart() */
  364. hlist_del_init(&sb->s_instances);
  365. spin_unlock(&sb_lock);
  366. up_write(&sb->s_umount);
  367. }
  368. EXPORT_SYMBOL(generic_shutdown_super);
  369. /**
  370. * sget - find or create a superblock
  371. * @type: filesystem type superblock should belong to
  372. * @test: comparison callback
  373. * @set: setup callback
  374. * @flags: mount flags
  375. * @data: argument to each of them
  376. */
  377. struct super_block *sget(struct file_system_type *type,
  378. int (*test)(struct super_block *,void *),
  379. int (*set)(struct super_block *,void *),
  380. int flags,
  381. void *data)
  382. {
  383. struct super_block *s = NULL;
  384. struct super_block *old;
  385. int err;
  386. retry:
  387. spin_lock(&sb_lock);
  388. if (test) {
  389. hlist_for_each_entry(old, &type->fs_supers, s_instances) {
  390. if (!test(old, data))
  391. continue;
  392. if (!grab_super(old))
  393. goto retry;
  394. if (s) {
  395. up_write(&s->s_umount);
  396. destroy_super(s);
  397. s = NULL;
  398. }
  399. return old;
  400. }
  401. }
  402. if (!s) {
  403. spin_unlock(&sb_lock);
  404. s = alloc_super(type, flags);
  405. if (!s)
  406. return ERR_PTR(-ENOMEM);
  407. goto retry;
  408. }
  409. err = set(s, data);
  410. if (err) {
  411. spin_unlock(&sb_lock);
  412. up_write(&s->s_umount);
  413. destroy_super(s);
  414. return ERR_PTR(err);
  415. }
  416. s->s_type = type;
  417. strlcpy(s->s_id, type->name, sizeof(s->s_id));
  418. list_add_tail(&s->s_list, &super_blocks);
  419. hlist_add_head(&s->s_instances, &type->fs_supers);
  420. spin_unlock(&sb_lock);
  421. get_filesystem(type);
  422. register_shrinker(&s->s_shrink);
  423. return s;
  424. }
  425. EXPORT_SYMBOL(sget);
  426. void drop_super(struct super_block *sb)
  427. {
  428. up_read(&sb->s_umount);
  429. put_super(sb);
  430. }
  431. EXPORT_SYMBOL(drop_super);
  432. /**
  433. * iterate_supers - call function for all active superblocks
  434. * @f: function to call
  435. * @arg: argument to pass to it
  436. *
  437. * Scans the superblock list and calls given function, passing it
  438. * locked superblock and given argument.
  439. */
  440. void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
  441. {
  442. struct super_block *sb, *p = NULL;
  443. spin_lock(&sb_lock);
  444. list_for_each_entry(sb, &super_blocks, s_list) {
  445. if (hlist_unhashed(&sb->s_instances))
  446. continue;
  447. sb->s_count++;
  448. spin_unlock(&sb_lock);
  449. down_read(&sb->s_umount);
  450. if (sb->s_root && (sb->s_flags & MS_BORN))
  451. f(sb, arg);
  452. up_read(&sb->s_umount);
  453. spin_lock(&sb_lock);
  454. if (p)
  455. __put_super(p);
  456. p = sb;
  457. }
  458. if (p)
  459. __put_super(p);
  460. spin_unlock(&sb_lock);
  461. }
  462. /**
  463. * iterate_supers_type - call function for superblocks of given type
  464. * @type: fs type
  465. * @f: function to call
  466. * @arg: argument to pass to it
  467. *
  468. * Scans the superblock list and calls given function, passing it
  469. * locked superblock and given argument.
  470. */
  471. void iterate_supers_type(struct file_system_type *type,
  472. void (*f)(struct super_block *, void *), void *arg)
  473. {
  474. struct super_block *sb, *p = NULL;
  475. spin_lock(&sb_lock);
  476. hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
  477. sb->s_count++;
  478. spin_unlock(&sb_lock);
  479. down_read(&sb->s_umount);
  480. if (sb->s_root && (sb->s_flags & MS_BORN))
  481. f(sb, arg);
  482. up_read(&sb->s_umount);
  483. spin_lock(&sb_lock);
  484. if (p)
  485. __put_super(p);
  486. p = sb;
  487. }
  488. if (p)
  489. __put_super(p);
  490. spin_unlock(&sb_lock);
  491. }
  492. EXPORT_SYMBOL(iterate_supers_type);
  493. /**
  494. * get_super - get the superblock of a device
  495. * @bdev: device to get the superblock for
  496. *
  497. * Scans the superblock list and finds the superblock of the file system
  498. * mounted on the device given. %NULL is returned if no match is found.
  499. */
  500. struct super_block *get_super(struct block_device *bdev)
  501. {
  502. struct super_block *sb;
  503. if (!bdev)
  504. return NULL;
  505. spin_lock(&sb_lock);
  506. rescan:
  507. list_for_each_entry(sb, &super_blocks, s_list) {
  508. if (hlist_unhashed(&sb->s_instances))
  509. continue;
  510. if (sb->s_bdev == bdev) {
  511. sb->s_count++;
  512. spin_unlock(&sb_lock);
  513. down_read(&sb->s_umount);
  514. /* still alive? */
  515. if (sb->s_root && (sb->s_flags & MS_BORN))
  516. return sb;
  517. up_read(&sb->s_umount);
  518. /* nope, got unmounted */
  519. spin_lock(&sb_lock);
  520. __put_super(sb);
  521. goto rescan;
  522. }
  523. }
  524. spin_unlock(&sb_lock);
  525. return NULL;
  526. }
  527. EXPORT_SYMBOL(get_super);
  528. /**
  529. * get_super_thawed - get thawed superblock of a device
  530. * @bdev: device to get the superblock for
  531. *
  532. * Scans the superblock list and finds the superblock of the file system
  533. * mounted on the device. The superblock is returned once it is thawed
  534. * (or immediately if it was not frozen). %NULL is returned if no match
  535. * is found.
  536. */
  537. struct super_block *get_super_thawed(struct block_device *bdev)
  538. {
  539. while (1) {
  540. struct super_block *s = get_super(bdev);
  541. if (!s || s->s_writers.frozen == SB_UNFROZEN)
  542. return s;
  543. up_read(&s->s_umount);
  544. wait_event(s->s_writers.wait_unfrozen,
  545. s->s_writers.frozen == SB_UNFROZEN);
  546. put_super(s);
  547. }
  548. }
  549. EXPORT_SYMBOL(get_super_thawed);
  550. /**
  551. * get_active_super - get an active reference to the superblock of a device
  552. * @bdev: device to get the superblock for
  553. *
  554. * Scans the superblock list and finds the superblock of the file system
  555. * mounted on the device given. Returns the superblock with an active
  556. * reference or %NULL if none was found.
  557. */
  558. struct super_block *get_active_super(struct block_device *bdev)
  559. {
  560. struct super_block *sb;
  561. if (!bdev)
  562. return NULL;
  563. restart:
  564. spin_lock(&sb_lock);
  565. list_for_each_entry(sb, &super_blocks, s_list) {
  566. if (hlist_unhashed(&sb->s_instances))
  567. continue;
  568. if (sb->s_bdev == bdev) {
  569. if (!grab_super(sb))
  570. goto restart;
  571. up_write(&sb->s_umount);
  572. return sb;
  573. }
  574. }
  575. spin_unlock(&sb_lock);
  576. return NULL;
  577. }
  578. struct super_block *user_get_super(dev_t dev)
  579. {
  580. struct super_block *sb;
  581. spin_lock(&sb_lock);
  582. rescan:
  583. list_for_each_entry(sb, &super_blocks, s_list) {
  584. if (hlist_unhashed(&sb->s_instances))
  585. continue;
  586. if (sb->s_dev == dev) {
  587. sb->s_count++;
  588. spin_unlock(&sb_lock);
  589. down_read(&sb->s_umount);
  590. /* still alive? */
  591. if (sb->s_root && (sb->s_flags & MS_BORN))
  592. return sb;
  593. up_read(&sb->s_umount);
  594. /* nope, got unmounted */
  595. spin_lock(&sb_lock);
  596. __put_super(sb);
  597. goto rescan;
  598. }
  599. }
  600. spin_unlock(&sb_lock);
  601. return NULL;
  602. }
  603. /**
  604. * do_remount_sb - asks filesystem to change mount options.
  605. * @sb: superblock in question
  606. * @flags: numeric part of options
  607. * @data: the rest of options
  608. * @force: whether or not to force the change
  609. *
  610. * Alters the mount options of a mounted file system.
  611. */
  612. int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
  613. {
  614. int retval;
  615. int remount_ro;
  616. if (sb->s_writers.frozen != SB_UNFROZEN)
  617. return -EBUSY;
  618. #ifdef CONFIG_BLOCK
  619. if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
  620. return -EACCES;
  621. #endif
  622. remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
  623. if (remount_ro) {
  624. if (sb->s_pins.first) {
  625. up_write(&sb->s_umount);
  626. sb_pin_kill(sb);
  627. down_write(&sb->s_umount);
  628. if (!sb->s_root)
  629. return 0;
  630. if (sb->s_writers.frozen != SB_UNFROZEN)
  631. return -EBUSY;
  632. remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
  633. }
  634. }
  635. shrink_dcache_sb(sb);
  636. /* If we are remounting RDONLY and current sb is read/write,
  637. make sure there are no rw files opened */
  638. if (remount_ro) {
  639. if (force) {
  640. sb->s_readonly_remount = 1;
  641. smp_wmb();
  642. } else {
  643. retval = sb_prepare_remount_readonly(sb);
  644. if (retval)
  645. return retval;
  646. }
  647. }
  648. if (sb->s_op->remount_fs) {
  649. retval = sb->s_op->remount_fs(sb, &flags, data);
  650. if (retval) {
  651. if (!force)
  652. goto cancel_readonly;
  653. /* If forced remount, go ahead despite any errors */
  654. WARN(1, "forced remount of a %s fs returned %i\n",
  655. sb->s_type->name, retval);
  656. }
  657. }
  658. sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
  659. /* Needs to be ordered wrt mnt_is_readonly() */
  660. smp_wmb();
  661. sb->s_readonly_remount = 0;
  662. /*
  663. * Some filesystems modify their metadata via some other path than the
  664. * bdev buffer cache (eg. use a private mapping, or directories in
  665. * pagecache, etc). Also file data modifications go via their own
  666. * mappings. So If we try to mount readonly then copy the filesystem
  667. * from bdev, we could get stale data, so invalidate it to give a best
  668. * effort at coherency.
  669. */
  670. if (remount_ro && sb->s_bdev)
  671. invalidate_bdev(sb->s_bdev);
  672. return 0;
  673. cancel_readonly:
  674. sb->s_readonly_remount = 0;
  675. return retval;
  676. }
  677. static void do_emergency_remount(struct work_struct *work)
  678. {
  679. struct super_block *sb, *p = NULL;
  680. spin_lock(&sb_lock);
  681. list_for_each_entry(sb, &super_blocks, s_list) {
  682. if (hlist_unhashed(&sb->s_instances))
  683. continue;
  684. sb->s_count++;
  685. spin_unlock(&sb_lock);
  686. down_write(&sb->s_umount);
  687. if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
  688. !(sb->s_flags & MS_RDONLY)) {
  689. /*
  690. * What lock protects sb->s_flags??
  691. */
  692. do_remount_sb(sb, MS_RDONLY, NULL, 1);
  693. }
  694. up_write(&sb->s_umount);
  695. spin_lock(&sb_lock);
  696. if (p)
  697. __put_super(p);
  698. p = sb;
  699. }
  700. if (p)
  701. __put_super(p);
  702. spin_unlock(&sb_lock);
  703. kfree(work);
  704. printk("Emergency Remount complete\n");
  705. }
  706. void emergency_remount(void)
  707. {
  708. struct work_struct *work;
  709. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  710. if (work) {
  711. INIT_WORK(work, do_emergency_remount);
  712. schedule_work(work);
  713. }
  714. }
  715. /*
  716. * Unnamed block devices are dummy devices used by virtual
  717. * filesystems which don't use real block-devices. -- jrs
  718. */
  719. static DEFINE_IDA(unnamed_dev_ida);
  720. static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
  721. /* Many userspace utilities consider an FSID of 0 invalid.
  722. * Always return at least 1 from get_anon_bdev.
  723. */
  724. static int unnamed_dev_start = 1;
  725. int get_anon_bdev(dev_t *p)
  726. {
  727. int dev;
  728. int error;
  729. retry:
  730. if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
  731. return -ENOMEM;
  732. spin_lock(&unnamed_dev_lock);
  733. error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
  734. if (!error)
  735. unnamed_dev_start = dev + 1;
  736. spin_unlock(&unnamed_dev_lock);
  737. if (error == -EAGAIN)
  738. /* We raced and lost with another CPU. */
  739. goto retry;
  740. else if (error)
  741. return -EAGAIN;
  742. if (dev == (1 << MINORBITS)) {
  743. spin_lock(&unnamed_dev_lock);
  744. ida_remove(&unnamed_dev_ida, dev);
  745. if (unnamed_dev_start > dev)
  746. unnamed_dev_start = dev;
  747. spin_unlock(&unnamed_dev_lock);
  748. return -EMFILE;
  749. }
  750. *p = MKDEV(0, dev & MINORMASK);
  751. return 0;
  752. }
  753. EXPORT_SYMBOL(get_anon_bdev);
  754. void free_anon_bdev(dev_t dev)
  755. {
  756. int slot = MINOR(dev);
  757. spin_lock(&unnamed_dev_lock);
  758. ida_remove(&unnamed_dev_ida, slot);
  759. if (slot < unnamed_dev_start)
  760. unnamed_dev_start = slot;
  761. spin_unlock(&unnamed_dev_lock);
  762. }
  763. EXPORT_SYMBOL(free_anon_bdev);
  764. int set_anon_super(struct super_block *s, void *data)
  765. {
  766. int error = get_anon_bdev(&s->s_dev);
  767. if (!error)
  768. s->s_bdi = &noop_backing_dev_info;
  769. return error;
  770. }
  771. EXPORT_SYMBOL(set_anon_super);
  772. void kill_anon_super(struct super_block *sb)
  773. {
  774. dev_t dev = sb->s_dev;
  775. generic_shutdown_super(sb);
  776. free_anon_bdev(dev);
  777. }
  778. EXPORT_SYMBOL(kill_anon_super);
  779. void kill_litter_super(struct super_block *sb)
  780. {
  781. if (sb->s_root)
  782. d_genocide(sb->s_root);
  783. kill_anon_super(sb);
  784. }
  785. EXPORT_SYMBOL(kill_litter_super);
  786. static int ns_test_super(struct super_block *sb, void *data)
  787. {
  788. return sb->s_fs_info == data;
  789. }
  790. static int ns_set_super(struct super_block *sb, void *data)
  791. {
  792. sb->s_fs_info = data;
  793. return set_anon_super(sb, NULL);
  794. }
  795. struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
  796. void *data, int (*fill_super)(struct super_block *, void *, int))
  797. {
  798. struct super_block *sb;
  799. sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
  800. if (IS_ERR(sb))
  801. return ERR_CAST(sb);
  802. if (!sb->s_root) {
  803. int err;
  804. err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
  805. if (err) {
  806. deactivate_locked_super(sb);
  807. return ERR_PTR(err);
  808. }
  809. sb->s_flags |= MS_ACTIVE;
  810. }
  811. return dget(sb->s_root);
  812. }
  813. EXPORT_SYMBOL(mount_ns);
  814. #ifdef CONFIG_BLOCK
  815. static int set_bdev_super(struct super_block *s, void *data)
  816. {
  817. s->s_bdev = data;
  818. s->s_dev = s->s_bdev->bd_dev;
  819. /*
  820. * We set the bdi here to the queue backing, file systems can
  821. * overwrite this in ->fill_super()
  822. */
  823. s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
  824. return 0;
  825. }
  826. static int test_bdev_super(struct super_block *s, void *data)
  827. {
  828. return (void *)s->s_bdev == data;
  829. }
  830. struct dentry *mount_bdev(struct file_system_type *fs_type,
  831. int flags, const char *dev_name, void *data,
  832. int (*fill_super)(struct super_block *, void *, int))
  833. {
  834. struct block_device *bdev;
  835. struct super_block *s;
  836. fmode_t mode = FMODE_READ | FMODE_EXCL;
  837. int error = 0;
  838. if (!(flags & MS_RDONLY))
  839. mode |= FMODE_WRITE;
  840. bdev = blkdev_get_by_path(dev_name, mode, fs_type);
  841. if (IS_ERR(bdev))
  842. return ERR_CAST(bdev);
  843. /*
  844. * once the super is inserted into the list by sget, s_umount
  845. * will protect the lockfs code from trying to start a snapshot
  846. * while we are mounting
  847. */
  848. mutex_lock(&bdev->bd_fsfreeze_mutex);
  849. if (bdev->bd_fsfreeze_count > 0) {
  850. mutex_unlock(&bdev->bd_fsfreeze_mutex);
  851. error = -EBUSY;
  852. goto error_bdev;
  853. }
  854. s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
  855. bdev);
  856. mutex_unlock(&bdev->bd_fsfreeze_mutex);
  857. if (IS_ERR(s))
  858. goto error_s;
  859. if (s->s_root) {
  860. if ((flags ^ s->s_flags) & MS_RDONLY) {
  861. deactivate_locked_super(s);
  862. error = -EBUSY;
  863. goto error_bdev;
  864. }
  865. /*
  866. * s_umount nests inside bd_mutex during
  867. * __invalidate_device(). blkdev_put() acquires
  868. * bd_mutex and can't be called under s_umount. Drop
  869. * s_umount temporarily. This is safe as we're
  870. * holding an active reference.
  871. */
  872. up_write(&s->s_umount);
  873. blkdev_put(bdev, mode);
  874. down_write(&s->s_umount);
  875. } else {
  876. char b[BDEVNAME_SIZE];
  877. s->s_mode = mode;
  878. strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
  879. sb_set_blocksize(s, block_size(bdev));
  880. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  881. if (error) {
  882. deactivate_locked_super(s);
  883. goto error;
  884. }
  885. s->s_flags |= MS_ACTIVE;
  886. bdev->bd_super = s;
  887. }
  888. return dget(s->s_root);
  889. error_s:
  890. error = PTR_ERR(s);
  891. error_bdev:
  892. blkdev_put(bdev, mode);
  893. error:
  894. return ERR_PTR(error);
  895. }
  896. EXPORT_SYMBOL(mount_bdev);
  897. void kill_block_super(struct super_block *sb)
  898. {
  899. struct block_device *bdev = sb->s_bdev;
  900. fmode_t mode = sb->s_mode;
  901. bdev->bd_super = NULL;
  902. generic_shutdown_super(sb);
  903. sync_blockdev(bdev);
  904. WARN_ON_ONCE(!(mode & FMODE_EXCL));
  905. blkdev_put(bdev, mode | FMODE_EXCL);
  906. }
  907. EXPORT_SYMBOL(kill_block_super);
  908. #endif
  909. struct dentry *mount_nodev(struct file_system_type *fs_type,
  910. int flags, void *data,
  911. int (*fill_super)(struct super_block *, void *, int))
  912. {
  913. int error;
  914. struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
  915. if (IS_ERR(s))
  916. return ERR_CAST(s);
  917. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  918. if (error) {
  919. deactivate_locked_super(s);
  920. return ERR_PTR(error);
  921. }
  922. s->s_flags |= MS_ACTIVE;
  923. return dget(s->s_root);
  924. }
  925. EXPORT_SYMBOL(mount_nodev);
  926. static int compare_single(struct super_block *s, void *p)
  927. {
  928. return 1;
  929. }
  930. struct dentry *mount_single(struct file_system_type *fs_type,
  931. int flags, void *data,
  932. int (*fill_super)(struct super_block *, void *, int))
  933. {
  934. struct super_block *s;
  935. int error;
  936. s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
  937. if (IS_ERR(s))
  938. return ERR_CAST(s);
  939. if (!s->s_root) {
  940. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  941. if (error) {
  942. deactivate_locked_super(s);
  943. return ERR_PTR(error);
  944. }
  945. s->s_flags |= MS_ACTIVE;
  946. } else {
  947. do_remount_sb(s, flags, data, 0);
  948. }
  949. return dget(s->s_root);
  950. }
  951. EXPORT_SYMBOL(mount_single);
  952. struct dentry *
  953. mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
  954. {
  955. struct dentry *root;
  956. struct super_block *sb;
  957. char *secdata = NULL;
  958. int error = -ENOMEM;
  959. if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
  960. secdata = alloc_secdata();
  961. if (!secdata)
  962. goto out;
  963. error = security_sb_copy_data(data, secdata);
  964. if (error)
  965. goto out_free_secdata;
  966. }
  967. root = type->mount(type, flags, name, data);
  968. if (IS_ERR(root)) {
  969. error = PTR_ERR(root);
  970. goto out_free_secdata;
  971. }
  972. sb = root->d_sb;
  973. BUG_ON(!sb);
  974. WARN_ON(!sb->s_bdi);
  975. WARN_ON(sb->s_bdi == &default_backing_dev_info);
  976. sb->s_flags |= MS_BORN;
  977. error = security_sb_kern_mount(sb, flags, secdata);
  978. if (error)
  979. goto out_sb;
  980. /*
  981. * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
  982. * but s_maxbytes was an unsigned long long for many releases. Throw
  983. * this warning for a little while to try and catch filesystems that
  984. * violate this rule.
  985. */
  986. WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
  987. "negative value (%lld)\n", type->name, sb->s_maxbytes);
  988. up_write(&sb->s_umount);
  989. free_secdata(secdata);
  990. return root;
  991. out_sb:
  992. dput(root);
  993. deactivate_locked_super(sb);
  994. out_free_secdata:
  995. free_secdata(secdata);
  996. out:
  997. return ERR_PTR(error);
  998. }
  999. /*
  1000. * This is an internal function, please use sb_end_{write,pagefault,intwrite}
  1001. * instead.
  1002. */
  1003. void __sb_end_write(struct super_block *sb, int level)
  1004. {
  1005. percpu_counter_dec(&sb->s_writers.counter[level-1]);
  1006. /*
  1007. * Make sure s_writers are updated before we wake up waiters in
  1008. * freeze_super().
  1009. */
  1010. smp_mb();
  1011. if (waitqueue_active(&sb->s_writers.wait))
  1012. wake_up(&sb->s_writers.wait);
  1013. rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
  1014. }
  1015. EXPORT_SYMBOL(__sb_end_write);
  1016. #ifdef CONFIG_LOCKDEP
  1017. /*
  1018. * We want lockdep to tell us about possible deadlocks with freezing but
  1019. * it's it bit tricky to properly instrument it. Getting a freeze protection
  1020. * works as getting a read lock but there are subtle problems. XFS for example
  1021. * gets freeze protection on internal level twice in some cases, which is OK
  1022. * only because we already hold a freeze protection also on higher level. Due
  1023. * to these cases we have to tell lockdep we are doing trylock when we
  1024. * already hold a freeze protection for a higher freeze level.
  1025. */
  1026. static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
  1027. unsigned long ip)
  1028. {
  1029. int i;
  1030. if (!trylock) {
  1031. for (i = 0; i < level - 1; i++)
  1032. if (lock_is_held(&sb->s_writers.lock_map[i])) {
  1033. trylock = true;
  1034. break;
  1035. }
  1036. }
  1037. rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
  1038. }
  1039. #endif
  1040. /*
  1041. * This is an internal function, please use sb_start_{write,pagefault,intwrite}
  1042. * instead.
  1043. */
  1044. int __sb_start_write(struct super_block *sb, int level, bool wait)
  1045. {
  1046. retry:
  1047. if (unlikely(sb->s_writers.frozen >= level)) {
  1048. if (!wait)
  1049. return 0;
  1050. wait_event(sb->s_writers.wait_unfrozen,
  1051. sb->s_writers.frozen < level);
  1052. }
  1053. #ifdef CONFIG_LOCKDEP
  1054. acquire_freeze_lock(sb, level, !wait, _RET_IP_);
  1055. #endif
  1056. percpu_counter_inc(&sb->s_writers.counter[level-1]);
  1057. /*
  1058. * Make sure counter is updated before we check for frozen.
  1059. * freeze_super() first sets frozen and then checks the counter.
  1060. */
  1061. smp_mb();
  1062. if (unlikely(sb->s_writers.frozen >= level)) {
  1063. __sb_end_write(sb, level);
  1064. goto retry;
  1065. }
  1066. return 1;
  1067. }
  1068. EXPORT_SYMBOL(__sb_start_write);
  1069. /**
  1070. * sb_wait_write - wait until all writers to given file system finish
  1071. * @sb: the super for which we wait
  1072. * @level: type of writers we wait for (normal vs page fault)
  1073. *
  1074. * This function waits until there are no writers of given type to given file
  1075. * system. Caller of this function should make sure there can be no new writers
  1076. * of type @level before calling this function. Otherwise this function can
  1077. * livelock.
  1078. */
  1079. static void sb_wait_write(struct super_block *sb, int level)
  1080. {
  1081. s64 writers;
  1082. /*
  1083. * We just cycle-through lockdep here so that it does not complain
  1084. * about returning with lock to userspace
  1085. */
  1086. rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
  1087. rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
  1088. do {
  1089. DEFINE_WAIT(wait);
  1090. /*
  1091. * We use a barrier in prepare_to_wait() to separate setting
  1092. * of frozen and checking of the counter
  1093. */
  1094. prepare_to_wait(&sb->s_writers.wait, &wait,
  1095. TASK_UNINTERRUPTIBLE);
  1096. writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
  1097. if (writers)
  1098. schedule();
  1099. finish_wait(&sb->s_writers.wait, &wait);
  1100. } while (writers);
  1101. }
  1102. /**
  1103. * freeze_super - lock the filesystem and force it into a consistent state
  1104. * @sb: the super to lock
  1105. *
  1106. * Syncs the super to make sure the filesystem is consistent and calls the fs's
  1107. * freeze_fs. Subsequent calls to this without first thawing the fs will return
  1108. * -EBUSY.
  1109. *
  1110. * During this function, sb->s_writers.frozen goes through these values:
  1111. *
  1112. * SB_UNFROZEN: File system is normal, all writes progress as usual.
  1113. *
  1114. * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
  1115. * writes should be blocked, though page faults are still allowed. We wait for
  1116. * all writes to complete and then proceed to the next stage.
  1117. *
  1118. * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
  1119. * but internal fs threads can still modify the filesystem (although they
  1120. * should not dirty new pages or inodes), writeback can run etc. After waiting
  1121. * for all running page faults we sync the filesystem which will clean all
  1122. * dirty pages and inodes (no new dirty pages or inodes can be created when
  1123. * sync is running).
  1124. *
  1125. * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
  1126. * modification are blocked (e.g. XFS preallocation truncation on inode
  1127. * reclaim). This is usually implemented by blocking new transactions for
  1128. * filesystems that have them and need this additional guard. After all
  1129. * internal writers are finished we call ->freeze_fs() to finish filesystem
  1130. * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
  1131. * mostly auxiliary for filesystems to verify they do not modify frozen fs.
  1132. *
  1133. * sb->s_writers.frozen is protected by sb->s_umount.
  1134. */
  1135. int freeze_super(struct super_block *sb)
  1136. {
  1137. int ret;
  1138. atomic_inc(&sb->s_active);
  1139. down_write(&sb->s_umount);
  1140. if (sb->s_writers.frozen != SB_UNFROZEN) {
  1141. deactivate_locked_super(sb);
  1142. return -EBUSY;
  1143. }
  1144. if (!(sb->s_flags & MS_BORN)) {
  1145. up_write(&sb->s_umount);
  1146. return 0; /* sic - it's "nothing to do" */
  1147. }
  1148. if (sb->s_flags & MS_RDONLY) {
  1149. /* Nothing to do really... */
  1150. sb->s_writers.frozen = SB_FREEZE_COMPLETE;
  1151. up_write(&sb->s_umount);
  1152. return 0;
  1153. }
  1154. /* From now on, no new normal writers can start */
  1155. sb->s_writers.frozen = SB_FREEZE_WRITE;
  1156. smp_wmb();
  1157. /* Release s_umount to preserve sb_start_write -> s_umount ordering */
  1158. up_write(&sb->s_umount);
  1159. sb_wait_write(sb, SB_FREEZE_WRITE);
  1160. /* Now we go and block page faults... */
  1161. down_write(&sb->s_umount);
  1162. sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
  1163. smp_wmb();
  1164. sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
  1165. /* All writers are done so after syncing there won't be dirty data */
  1166. sync_filesystem(sb);
  1167. /* Now wait for internal filesystem counter */
  1168. sb->s_writers.frozen = SB_FREEZE_FS;
  1169. smp_wmb();
  1170. sb_wait_write(sb, SB_FREEZE_FS);
  1171. if (sb->s_op->freeze_fs) {
  1172. ret = sb->s_op->freeze_fs(sb);
  1173. if (ret) {
  1174. printk(KERN_ERR
  1175. "VFS:Filesystem freeze failed\n");
  1176. sb->s_writers.frozen = SB_UNFROZEN;
  1177. smp_wmb();
  1178. wake_up(&sb->s_writers.wait_unfrozen);
  1179. deactivate_locked_super(sb);
  1180. return ret;
  1181. }
  1182. }
  1183. /*
  1184. * This is just for debugging purposes so that fs can warn if it
  1185. * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
  1186. */
  1187. sb->s_writers.frozen = SB_FREEZE_COMPLETE;
  1188. up_write(&sb->s_umount);
  1189. return 0;
  1190. }
  1191. EXPORT_SYMBOL(freeze_super);
  1192. /**
  1193. * thaw_super -- unlock filesystem
  1194. * @sb: the super to thaw
  1195. *
  1196. * Unlocks the filesystem and marks it writeable again after freeze_super().
  1197. */
  1198. int thaw_super(struct super_block *sb)
  1199. {
  1200. int error;
  1201. down_write(&sb->s_umount);
  1202. if (sb->s_writers.frozen == SB_UNFROZEN) {
  1203. up_write(&sb->s_umount);
  1204. return -EINVAL;
  1205. }
  1206. if (sb->s_flags & MS_RDONLY)
  1207. goto out;
  1208. if (sb->s_op->unfreeze_fs) {
  1209. error = sb->s_op->unfreeze_fs(sb);
  1210. if (error) {
  1211. printk(KERN_ERR
  1212. "VFS:Filesystem thaw failed\n");
  1213. up_write(&sb->s_umount);
  1214. return error;
  1215. }
  1216. }
  1217. out:
  1218. sb->s_writers.frozen = SB_UNFROZEN;
  1219. smp_wmb();
  1220. wake_up(&sb->s_writers.wait_unfrozen);
  1221. deactivate_locked_super(sb);
  1222. return 0;
  1223. }
  1224. EXPORT_SYMBOL(thaw_super);