pnode.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /*
  2. * linux/fs/pnode.c
  3. *
  4. * (C) Copyright IBM Corporation 2005.
  5. * Released under GPL v2.
  6. * Author : Ram Pai (linuxram@us.ibm.com)
  7. *
  8. */
  9. #include <linux/mnt_namespace.h>
  10. #include <linux/mount.h>
  11. #include <linux/fs.h>
  12. #include <linux/nsproxy.h>
  13. #include "internal.h"
  14. #include "pnode.h"
  15. /* return the next shared peer mount of @p */
  16. static inline struct mount *next_peer(struct mount *p)
  17. {
  18. return list_entry(p->mnt_share.next, struct mount, mnt_share);
  19. }
  20. static inline struct mount *first_slave(struct mount *p)
  21. {
  22. return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
  23. }
  24. static inline struct mount *next_slave(struct mount *p)
  25. {
  26. return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
  27. }
  28. static struct mount *get_peer_under_root(struct mount *mnt,
  29. struct mnt_namespace *ns,
  30. const struct path *root)
  31. {
  32. struct mount *m = mnt;
  33. do {
  34. /* Check the namespace first for optimization */
  35. if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
  36. return m;
  37. m = next_peer(m);
  38. } while (m != mnt);
  39. return NULL;
  40. }
  41. /*
  42. * Get ID of closest dominating peer group having a representative
  43. * under the given root.
  44. *
  45. * Caller must hold namespace_sem
  46. */
  47. int get_dominating_id(struct mount *mnt, const struct path *root)
  48. {
  49. struct mount *m;
  50. for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
  51. struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
  52. if (d)
  53. return d->mnt_group_id;
  54. }
  55. return 0;
  56. }
  57. static int do_make_slave(struct mount *mnt)
  58. {
  59. struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
  60. struct mount *slave_mnt;
  61. /*
  62. * slave 'mnt' to a peer mount that has the
  63. * same root dentry. If none is available then
  64. * slave it to anything that is available.
  65. */
  66. while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
  67. peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
  68. if (peer_mnt == mnt) {
  69. peer_mnt = next_peer(mnt);
  70. if (peer_mnt == mnt)
  71. peer_mnt = NULL;
  72. }
  73. if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
  74. list_empty(&mnt->mnt_share))
  75. mnt_release_group_id(mnt);
  76. list_del_init(&mnt->mnt_share);
  77. mnt->mnt_group_id = 0;
  78. if (peer_mnt)
  79. master = peer_mnt;
  80. if (master) {
  81. list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
  82. slave_mnt->mnt_master = master;
  83. list_move(&mnt->mnt_slave, &master->mnt_slave_list);
  84. list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
  85. INIT_LIST_HEAD(&mnt->mnt_slave_list);
  86. } else {
  87. struct list_head *p = &mnt->mnt_slave_list;
  88. while (!list_empty(p)) {
  89. slave_mnt = list_first_entry(p,
  90. struct mount, mnt_slave);
  91. list_del_init(&slave_mnt->mnt_slave);
  92. slave_mnt->mnt_master = NULL;
  93. }
  94. }
  95. mnt->mnt_master = master;
  96. CLEAR_MNT_SHARED(mnt);
  97. return 0;
  98. }
  99. /*
  100. * vfsmount lock must be held for write
  101. */
  102. void change_mnt_propagation(struct mount *mnt, int type)
  103. {
  104. if (type == MS_SHARED) {
  105. set_mnt_shared(mnt);
  106. return;
  107. }
  108. do_make_slave(mnt);
  109. if (type != MS_SLAVE) {
  110. list_del_init(&mnt->mnt_slave);
  111. mnt->mnt_master = NULL;
  112. if (type == MS_UNBINDABLE)
  113. mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
  114. else
  115. mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
  116. }
  117. }
  118. /*
  119. * get the next mount in the propagation tree.
  120. * @m: the mount seen last
  121. * @origin: the original mount from where the tree walk initiated
  122. *
  123. * Note that peer groups form contiguous segments of slave lists.
  124. * We rely on that in get_source() to be able to find out if
  125. * vfsmount found while iterating with propagation_next() is
  126. * a peer of one we'd found earlier.
  127. */
  128. static struct mount *propagation_next(struct mount *m,
  129. struct mount *origin)
  130. {
  131. /* are there any slaves of this mount? */
  132. if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
  133. return first_slave(m);
  134. while (1) {
  135. struct mount *master = m->mnt_master;
  136. if (master == origin->mnt_master) {
  137. struct mount *next = next_peer(m);
  138. return (next == origin) ? NULL : next;
  139. } else if (m->mnt_slave.next != &master->mnt_slave_list)
  140. return next_slave(m);
  141. /* back at master */
  142. m = master;
  143. }
  144. }
  145. static struct mount *next_group(struct mount *m, struct mount *origin)
  146. {
  147. while (1) {
  148. while (1) {
  149. struct mount *next;
  150. if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
  151. return first_slave(m);
  152. next = next_peer(m);
  153. if (m->mnt_group_id == origin->mnt_group_id) {
  154. if (next == origin)
  155. return NULL;
  156. } else if (m->mnt_slave.next != &next->mnt_slave)
  157. break;
  158. m = next;
  159. }
  160. /* m is the last peer */
  161. while (1) {
  162. struct mount *master = m->mnt_master;
  163. if (m->mnt_slave.next != &master->mnt_slave_list)
  164. return next_slave(m);
  165. m = next_peer(master);
  166. if (master->mnt_group_id == origin->mnt_group_id)
  167. break;
  168. if (master->mnt_slave.next == &m->mnt_slave)
  169. break;
  170. m = master;
  171. }
  172. if (m == origin)
  173. return NULL;
  174. }
  175. }
  176. /* all accesses are serialized by namespace_sem */
  177. static struct user_namespace *user_ns;
  178. static struct mount *last_dest, *first_source, *last_source, *dest_master;
  179. static struct mountpoint *mp;
  180. static struct hlist_head *list;
  181. static inline bool peers(struct mount *m1, struct mount *m2)
  182. {
  183. return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
  184. }
  185. static int propagate_one(struct mount *m)
  186. {
  187. struct mount *child;
  188. int type;
  189. /* skip ones added by this propagate_mnt() */
  190. if (IS_MNT_NEW(m))
  191. return 0;
  192. /* skip if mountpoint isn't covered by it */
  193. if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
  194. return 0;
  195. if (peers(m, last_dest)) {
  196. type = CL_MAKE_SHARED;
  197. } else {
  198. struct mount *n, *p;
  199. bool done;
  200. for (n = m; ; n = p) {
  201. p = n->mnt_master;
  202. if (p == dest_master || IS_MNT_MARKED(p))
  203. break;
  204. }
  205. do {
  206. struct mount *parent = last_source->mnt_parent;
  207. if (last_source == first_source)
  208. break;
  209. done = parent->mnt_master == p;
  210. if (done && peers(n, parent))
  211. break;
  212. last_source = last_source->mnt_master;
  213. } while (!done);
  214. type = CL_SLAVE;
  215. /* beginning of peer group among the slaves? */
  216. if (IS_MNT_SHARED(m))
  217. type |= CL_MAKE_SHARED;
  218. }
  219. /* Notice when we are propagating across user namespaces */
  220. if (m->mnt_ns->user_ns != user_ns)
  221. type |= CL_UNPRIVILEGED;
  222. child = copy_tree(last_source, last_source->mnt.mnt_root, type);
  223. if (IS_ERR(child))
  224. return PTR_ERR(child);
  225. child->mnt.mnt_flags &= ~MNT_LOCKED;
  226. mnt_set_mountpoint(m, mp, child);
  227. last_dest = m;
  228. last_source = child;
  229. if (m->mnt_master != dest_master) {
  230. read_seqlock_excl(&mount_lock);
  231. SET_MNT_MARK(m->mnt_master);
  232. read_sequnlock_excl(&mount_lock);
  233. }
  234. hlist_add_head(&child->mnt_hash, list);
  235. return 0;
  236. }
  237. /*
  238. * mount 'source_mnt' under the destination 'dest_mnt' at
  239. * dentry 'dest_dentry'. And propagate that mount to
  240. * all the peer and slave mounts of 'dest_mnt'.
  241. * Link all the new mounts into a propagation tree headed at
  242. * source_mnt. Also link all the new mounts using ->mnt_list
  243. * headed at source_mnt's ->mnt_list
  244. *
  245. * @dest_mnt: destination mount.
  246. * @dest_dentry: destination dentry.
  247. * @source_mnt: source mount.
  248. * @tree_list : list of heads of trees to be attached.
  249. */
  250. int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
  251. struct mount *source_mnt, struct hlist_head *tree_list)
  252. {
  253. struct mount *m, *n;
  254. int ret = 0;
  255. /*
  256. * we don't want to bother passing tons of arguments to
  257. * propagate_one(); everything is serialized by namespace_sem,
  258. * so globals will do just fine.
  259. */
  260. user_ns = current->nsproxy->mnt_ns->user_ns;
  261. last_dest = dest_mnt;
  262. first_source = source_mnt;
  263. last_source = source_mnt;
  264. mp = dest_mp;
  265. list = tree_list;
  266. dest_master = dest_mnt->mnt_master;
  267. /* all peers of dest_mnt, except dest_mnt itself */
  268. for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
  269. ret = propagate_one(n);
  270. if (ret)
  271. goto out;
  272. }
  273. /* all slave groups */
  274. for (m = next_group(dest_mnt, dest_mnt); m;
  275. m = next_group(m, dest_mnt)) {
  276. /* everything in that slave group */
  277. n = m;
  278. do {
  279. ret = propagate_one(n);
  280. if (ret)
  281. goto out;
  282. n = next_peer(n);
  283. } while (n != m);
  284. }
  285. out:
  286. read_seqlock_excl(&mount_lock);
  287. hlist_for_each_entry(n, tree_list, mnt_hash) {
  288. m = n->mnt_parent;
  289. if (m->mnt_master != dest_mnt->mnt_master)
  290. CLEAR_MNT_MARK(m->mnt_master);
  291. }
  292. read_sequnlock_excl(&mount_lock);
  293. return ret;
  294. }
  295. /*
  296. * return true if the refcount is greater than count
  297. */
  298. static inline int do_refcount_check(struct mount *mnt, int count)
  299. {
  300. return mnt_get_count(mnt) > count;
  301. }
  302. /*
  303. * check if the mount 'mnt' can be unmounted successfully.
  304. * @mnt: the mount to be checked for unmount
  305. * NOTE: unmounting 'mnt' would naturally propagate to all
  306. * other mounts its parent propagates to.
  307. * Check if any of these mounts that **do not have submounts**
  308. * have more references than 'refcnt'. If so return busy.
  309. *
  310. * vfsmount lock must be held for write
  311. */
  312. int propagate_mount_busy(struct mount *mnt, int refcnt)
  313. {
  314. struct mount *m, *child;
  315. struct mount *parent = mnt->mnt_parent;
  316. int ret = 0;
  317. if (mnt == parent)
  318. return do_refcount_check(mnt, refcnt);
  319. /*
  320. * quickly check if the current mount can be unmounted.
  321. * If not, we don't have to go checking for all other
  322. * mounts
  323. */
  324. if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
  325. return 1;
  326. for (m = propagation_next(parent, parent); m;
  327. m = propagation_next(m, parent)) {
  328. child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
  329. if (child && list_empty(&child->mnt_mounts) &&
  330. (ret = do_refcount_check(child, 1)))
  331. break;
  332. }
  333. return ret;
  334. }
  335. /*
  336. * Clear MNT_LOCKED when it can be shown to be safe.
  337. *
  338. * mount_lock lock must be held for write
  339. */
  340. void propagate_mount_unlock(struct mount *mnt)
  341. {
  342. struct mount *parent = mnt->mnt_parent;
  343. struct mount *m, *child;
  344. BUG_ON(parent == mnt);
  345. for (m = propagation_next(parent, parent); m;
  346. m = propagation_next(m, parent)) {
  347. child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
  348. if (child)
  349. child->mnt.mnt_flags &= ~MNT_LOCKED;
  350. }
  351. }
  352. /*
  353. * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
  354. */
  355. static void mark_umount_candidates(struct mount *mnt)
  356. {
  357. struct mount *parent = mnt->mnt_parent;
  358. struct mount *m;
  359. BUG_ON(parent == mnt);
  360. for (m = propagation_next(parent, parent); m;
  361. m = propagation_next(m, parent)) {
  362. struct mount *child = __lookup_mnt_last(&m->mnt,
  363. mnt->mnt_mountpoint);
  364. if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
  365. SET_MNT_MARK(child);
  366. }
  367. }
  368. }
  369. /*
  370. * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
  371. * parent propagates to.
  372. */
  373. static void __propagate_umount(struct mount *mnt)
  374. {
  375. struct mount *parent = mnt->mnt_parent;
  376. struct mount *m;
  377. BUG_ON(parent == mnt);
  378. for (m = propagation_next(parent, parent); m;
  379. m = propagation_next(m, parent)) {
  380. struct mount *child = __lookup_mnt_last(&m->mnt,
  381. mnt->mnt_mountpoint);
  382. /*
  383. * umount the child only if the child has no children
  384. * and the child is marked safe to unmount.
  385. */
  386. if (!child || !IS_MNT_MARKED(child))
  387. continue;
  388. CLEAR_MNT_MARK(child);
  389. if (list_empty(&child->mnt_mounts)) {
  390. list_del_init(&child->mnt_child);
  391. child->mnt.mnt_flags |= MNT_UMOUNT;
  392. list_move_tail(&child->mnt_list, &mnt->mnt_list);
  393. }
  394. }
  395. }
  396. /*
  397. * collect all mounts that receive propagation from the mount in @list,
  398. * and return these additional mounts in the same list.
  399. * @list: the list of mounts to be unmounted.
  400. *
  401. * vfsmount lock must be held for write
  402. */
  403. int propagate_umount(struct list_head *list)
  404. {
  405. struct mount *mnt;
  406. list_for_each_entry_reverse(mnt, list, mnt_list)
  407. mark_umount_candidates(mnt);
  408. list_for_each_entry(mnt, list, mnt_list)
  409. __propagate_umount(mnt);
  410. return 0;
  411. }