|
@@ -98,6 +98,14 @@ static DEFINE_MUTEX(cgroup_root_mutex);
|
|
|
lockdep_is_held(&cgroup_mutex), \
|
|
|
"cgroup_mutex or RCU read lock required");
|
|
|
|
|
|
+#ifdef CONFIG_LOCKDEP
|
|
|
+#define cgroup_assert_mutex_or_root_locked() \
|
|
|
+ WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&cgroup_mutex) && \
|
|
|
+ !lockdep_is_held(&cgroup_root_mutex)))
|
|
|
+#else
|
|
|
+#define cgroup_assert_mutex_or_root_locked() do { } while (0)
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* cgroup destruction makes heavy use of work items and there can be a lot
|
|
|
* of concurrent destructions. Use a separate workqueue so that cgroup
|
|
@@ -237,14 +245,15 @@ static int notify_on_release(const struct cgroup *cgrp)
|
|
|
/**
|
|
|
* for_each_subsys - iterate all loaded cgroup subsystems
|
|
|
* @ss: the iteration cursor
|
|
|
- * @i: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
|
|
|
+ * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
|
|
|
*
|
|
|
- * Should be called under cgroup_mutex.
|
|
|
+ * Iterates through all loaded subsystems. Should be called under
|
|
|
+ * cgroup_mutex or cgroup_root_mutex.
|
|
|
*/
|
|
|
-#define for_each_subsys(ss, i) \
|
|
|
- for ((i) = 0; (i) < CGROUP_SUBSYS_COUNT; (i)++) \
|
|
|
- if (({ lockdep_assert_held(&cgroup_mutex); \
|
|
|
- !((ss) = cgroup_subsys[i]); })) { } \
|
|
|
+#define for_each_subsys(ss, ssid) \
|
|
|
+ for (({ cgroup_assert_mutex_or_root_locked(); (ssid) = 0; }); \
|
|
|
+ (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
|
|
|
+ if (!((ss) = cgroup_subsys[(ssid)])) { } \
|
|
|
else
|
|
|
|
|
|
/**
|
|
@@ -4592,6 +4601,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
|
|
|
cgroup_init_cftsets(ss);
|
|
|
|
|
|
mutex_lock(&cgroup_mutex);
|
|
|
+ mutex_lock(&cgroup_root_mutex);
|
|
|
cgroup_subsys[ss->subsys_id] = ss;
|
|
|
|
|
|
/*
|
|
@@ -4641,10 +4651,12 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
|
|
|
goto err_unload;
|
|
|
|
|
|
/* success! */
|
|
|
+ mutex_unlock(&cgroup_root_mutex);
|
|
|
mutex_unlock(&cgroup_mutex);
|
|
|
return 0;
|
|
|
|
|
|
err_unload:
|
|
|
+ mutex_unlock(&cgroup_root_mutex);
|
|
|
mutex_unlock(&cgroup_mutex);
|
|
|
/* @ss can't be mounted here as try_module_get() would fail */
|
|
|
cgroup_unload_subsys(ss);
|
|
@@ -4674,6 +4686,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
|
|
|
BUG_ON(ss->root != &cgroup_dummy_root);
|
|
|
|
|
|
mutex_lock(&cgroup_mutex);
|
|
|
+ mutex_lock(&cgroup_root_mutex);
|
|
|
|
|
|
offline_css(cgroup_css(cgroup_dummy_top, ss));
|
|
|
|
|
@@ -4708,6 +4721,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
|
|
|
ss->css_free(cgroup_css(cgroup_dummy_top, ss));
|
|
|
RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
|
|
|
|
|
|
+ mutex_unlock(&cgroup_root_mutex);
|
|
|
mutex_unlock(&cgroup_mutex);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
|