|
@@ -99,6 +99,12 @@ static DEFINE_MUTEX(cgroup_mutex);
|
|
|
static DECLARE_RWSEM(css_set_rwsem);
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * Protects cgroup_idr so that IDs can be released without grabbing
|
|
|
+ * cgroup_mutex.
|
|
|
+ */
|
|
|
+static DEFINE_SPINLOCK(cgroup_idr_lock);
|
|
|
+
|
|
|
/*
|
|
|
* Protects cgroup_subsys->release_agent_path. Modifying it also requires
|
|
|
* cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
|
|
@@ -190,6 +196,37 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
|
|
|
bool is_add);
|
|
|
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
|
|
|
|
|
|
+/* IDR wrappers which synchronize using cgroup_idr_lock */
|
|
|
+static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
|
|
|
+ gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ idr_preload(gfp_mask);
|
|
|
+ spin_lock(&cgroup_idr_lock);
|
|
|
+ ret = idr_alloc(idr, ptr, start, end, gfp_mask);
|
|
|
+ spin_unlock(&cgroup_idr_lock);
|
|
|
+ idr_preload_end();
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
|
|
|
+{
|
|
|
+ void *ret;
|
|
|
+
|
|
|
+ spin_lock(&cgroup_idr_lock);
|
|
|
+ ret = idr_replace(idr, ptr, id);
|
|
|
+ spin_unlock(&cgroup_idr_lock);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void cgroup_idr_remove(struct idr *idr, int id)
|
|
|
+{
|
|
|
+ spin_lock(&cgroup_idr_lock);
|
|
|
+ idr_remove(idr, id);
|
|
|
+ spin_unlock(&cgroup_idr_lock);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* cgroup_css - obtain a cgroup's css for the specified subsystem
|
|
|
* @cgrp: the cgroup of interest
|
|
@@ -1058,9 +1095,7 @@ static void cgroup_put(struct cgroup *cgrp)
|
|
|
* per-subsystem and moved to css->id so that lookups are
|
|
|
* successful until the target css is released.
|
|
|
*/
|
|
|
- mutex_lock(&cgroup_mutex);
|
|
|
- idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
|
|
|
- mutex_unlock(&cgroup_mutex);
|
|
|
+ cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
|
|
|
cgrp->id = -1;
|
|
|
|
|
|
call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
|
|
@@ -1531,7 +1566,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
|
|
|
lockdep_assert_held(&cgroup_tree_mutex);
|
|
|
lockdep_assert_held(&cgroup_mutex);
|
|
|
|
|
|
- ret = idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
|
|
|
+ ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
|
|
|
if (ret < 0)
|
|
|
goto out;
|
|
|
root_cgrp->id = ret;
|
|
@@ -4225,7 +4260,7 @@ static long cgroup_create(struct cgroup *parent, const char *name,
|
|
|
* Temporarily set the pointer to NULL, so idr_find() won't return
|
|
|
* a half-baked cgroup.
|
|
|
*/
|
|
|
- cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
|
|
|
+ cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
|
|
|
if (cgrp->id < 0) {
|
|
|
err = -ENOMEM;
|
|
|
goto err_unlock;
|
|
@@ -4268,7 +4303,7 @@ static long cgroup_create(struct cgroup *parent, const char *name,
|
|
|
* @cgrp is now fully operational. If something fails after this
|
|
|
* point, it'll be released via the normal destruction path.
|
|
|
*/
|
|
|
- idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
|
|
|
+ cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
|
|
|
|
|
|
err = cgroup_kn_set_ugid(kn);
|
|
|
if (err)
|
|
@@ -4302,7 +4337,7 @@ static long cgroup_create(struct cgroup *parent, const char *name,
|
|
|
return 0;
|
|
|
|
|
|
err_free_id:
|
|
|
- idr_remove(&root->cgroup_idr, cgrp->id);
|
|
|
+ cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
|
|
|
err_unlock:
|
|
|
mutex_unlock(&cgroup_mutex);
|
|
|
err_unlock_tree:
|
|
@@ -5162,7 +5197,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
|
|
|
{
|
|
|
struct cgroup *cgrp;
|
|
|
|
|
|
- cgroup_assert_mutexes_or_rcu_locked();
|
|
|
+ WARN_ON_ONCE(!rcu_read_lock_held());
|
|
|
|
|
|
cgrp = idr_find(&ss->root->cgroup_idr, id);
|
|
|
if (cgrp)
|