|
@@ -778,6 +778,20 @@ static void attach_mnt(struct mount *mnt,
|
|
|
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
|
|
|
}
|
|
|
|
|
|
+static void attach_shadowed(struct mount *mnt,
|
|
|
+ struct mount *parent,
|
|
|
+ struct mount *shadows)
|
|
|
+{
|
|
|
+ if (shadows) {
|
|
|
+ hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
|
|
|
+ list_add(&mnt->mnt_child, &shadows->mnt_child);
|
|
|
+ } else {
|
|
|
+ hlist_add_head_rcu(&mnt->mnt_hash,
|
|
|
+ m_hash(&parent->mnt, mnt->mnt_mountpoint));
|
|
|
+ list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* vfsmount lock must be held for write
|
|
|
*/
|
|
@@ -796,12 +810,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
|
|
|
|
|
|
list_splice(&head, n->list.prev);
|
|
|
|
|
|
- if (shadows)
|
|
|
- hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
|
|
|
- else
|
|
|
- hlist_add_head_rcu(&mnt->mnt_hash,
|
|
|
- m_hash(&parent->mnt, mnt->mnt_mountpoint));
|
|
|
- list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
|
|
|
+ attach_shadowed(mnt, parent, shadows);
|
|
|
touch_mnt_namespace(n);
|
|
|
}
|
|
|
|
|
@@ -1474,6 +1483,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
|
|
|
continue;
|
|
|
|
|
|
for (s = r; s; s = next_mnt(s, r)) {
|
|
|
+ struct mount *t = NULL;
|
|
|
if (!(flag & CL_COPY_UNBINDABLE) &&
|
|
|
IS_MNT_UNBINDABLE(s)) {
|
|
|
s = skip_mnt_tree(s);
|
|
@@ -1495,7 +1505,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
|
|
|
goto out;
|
|
|
lock_mount_hash();
|
|
|
list_add_tail(&q->mnt_list, &res->mnt_list);
|
|
|
- attach_mnt(q, parent, p->mnt_mp);
|
|
|
+ mnt_set_mountpoint(parent, p->mnt_mp, q);
|
|
|
+ if (!list_empty(&parent->mnt_mounts)) {
|
|
|
+ t = list_last_entry(&parent->mnt_mounts,
|
|
|
+ struct mount, mnt_child);
|
|
|
+ if (t->mnt_mp != p->mnt_mp)
|
|
|
+ t = NULL;
|
|
|
+ }
|
|
|
+ attach_shadowed(q, parent, t);
|
|
|
unlock_mount_hash();
|
|
|
}
|
|
|
}
|