|
@@ -42,6 +42,10 @@ static void list_lru_unregister(struct list_lru *lru)
|
|
|
#ifdef CONFIG_MEMCG_KMEM
|
|
|
static inline bool list_lru_memcg_aware(struct list_lru *lru)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * This needs node 0 to be always present, even
|
|
|
+ * in the systems supporting sparse numa ids.
|
|
|
+ */
|
|
|
return !!lru->node[0].memcg_lrus;
|
|
|
}
|
|
|
|
|
@@ -377,16 +381,20 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < nr_node_ids; i++) {
|
|
|
- if (!memcg_aware)
|
|
|
- lru->node[i].memcg_lrus = NULL;
|
|
|
- else if (memcg_init_list_lru_node(&lru->node[i]))
|
|
|
+ if (!memcg_aware)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ for_each_node(i) {
|
|
|
+ if (memcg_init_list_lru_node(&lru->node[i]))
|
|
|
goto fail;
|
|
|
}
|
|
|
return 0;
|
|
|
fail:
|
|
|
- for (i = i - 1; i >= 0; i--)
|
|
|
+ for (i = i - 1; i >= 0; i--) {
|
|
|
+ if (!lru->node[i].memcg_lrus)
|
|
|
+ continue;
|
|
|
memcg_destroy_list_lru_node(&lru->node[i]);
|
|
|
+ }
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -397,7 +405,7 @@ static void memcg_destroy_list_lru(struct list_lru *lru)
|
|
|
if (!list_lru_memcg_aware(lru))
|
|
|
return;
|
|
|
|
|
|
- for (i = 0; i < nr_node_ids; i++)
|
|
|
+ for_each_node(i)
|
|
|
memcg_destroy_list_lru_node(&lru->node[i]);
|
|
|
}
|
|
|
|
|
@@ -409,16 +417,20 @@ static int memcg_update_list_lru(struct list_lru *lru,
|
|
|
if (!list_lru_memcg_aware(lru))
|
|
|
return 0;
|
|
|
|
|
|
- for (i = 0; i < nr_node_ids; i++) {
|
|
|
+ for_each_node(i) {
|
|
|
if (memcg_update_list_lru_node(&lru->node[i],
|
|
|
old_size, new_size))
|
|
|
goto fail;
|
|
|
}
|
|
|
return 0;
|
|
|
fail:
|
|
|
- for (i = i - 1; i >= 0; i--)
|
|
|
+ for (i = i - 1; i >= 0; i--) {
|
|
|
+ if (!lru->node[i].memcg_lrus)
|
|
|
+ continue;
|
|
|
+
|
|
|
memcg_cancel_update_list_lru_node(&lru->node[i],
|
|
|
old_size, new_size);
|
|
|
+ }
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -430,7 +442,7 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
|
|
|
if (!list_lru_memcg_aware(lru))
|
|
|
return;
|
|
|
|
|
|
- for (i = 0; i < nr_node_ids; i++)
|
|
|
+ for_each_node(i)
|
|
|
memcg_cancel_update_list_lru_node(&lru->node[i],
|
|
|
old_size, new_size);
|
|
|
}
|
|
@@ -485,7 +497,7 @@ static void memcg_drain_list_lru(struct list_lru *lru,
|
|
|
if (!list_lru_memcg_aware(lru))
|
|
|
return;
|
|
|
|
|
|
- for (i = 0; i < nr_node_ids; i++)
|
|
|
+ for_each_node(i)
|
|
|
memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
|
|
|
}
|
|
|
|
|
@@ -522,7 +534,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
|
|
|
if (!lru->node)
|
|
|
goto out;
|
|
|
|
|
|
- for (i = 0; i < nr_node_ids; i++) {
|
|
|
+ for_each_node(i) {
|
|
|
spin_lock_init(&lru->node[i].lock);
|
|
|
if (key)
|
|
|
lockdep_set_class(&lru->node[i].lock, key);
|