|
@@ -1648,10 +1648,12 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
|
|
int flags, const char *unused_dev_name,
|
|
int flags, const char *unused_dev_name,
|
|
void *data)
|
|
void *data)
|
|
{
|
|
{
|
|
|
|
+ struct cgroup_subsys *ss;
|
|
struct cgroup_root *root;
|
|
struct cgroup_root *root;
|
|
struct cgroup_sb_opts opts;
|
|
struct cgroup_sb_opts opts;
|
|
struct dentry *dentry;
|
|
struct dentry *dentry;
|
|
int ret;
|
|
int ret;
|
|
|
|
+ int i;
|
|
bool new_sb;
|
|
bool new_sb;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1677,6 +1679,27 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Destruction of cgroup root is asynchronous, so subsystems may
|
|
|
|
+ * still be dying after the previous unmount. Let's drain the
|
|
|
|
+ * dying subsystems. We just need to ensure that the ones
|
|
|
|
+ * unmounted previously finish dying and don't care about new ones
|
|
|
|
+ * starting. Testing ref liveliness is good enough.
|
|
|
|
+ */
|
|
|
|
+ for_each_subsys(ss, i) {
|
|
|
|
+ if (!(opts.subsys_mask & (1 << i)) ||
|
|
|
|
+ ss->root == &cgrp_dfl_root)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
|
|
|
|
+ mutex_unlock(&cgroup_mutex);
|
|
|
|
+ msleep(10);
|
|
|
|
+ ret = restart_syscall();
|
|
|
|
+ goto out_free;
|
|
|
|
+ }
|
|
|
|
+ cgroup_put(&ss->root->cgrp);
|
|
|
|
+ }
|
|
|
|
+
|
|
for_each_root(root) {
|
|
for_each_root(root) {
|
|
bool name_match = false;
|
|
bool name_match = false;
|
|
|
|
|