list_lru.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /*
  2. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3. * Authors: David Chinner and Glauber Costa
  4. *
  5. * Generic LRU infrastructure
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/list_lru.h>
  11. #include <linux/slab.h>
  12. #include <linux/mutex.h>
  13. #include <linux/memcontrol.h>
  14. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  15. static LIST_HEAD(list_lrus);
  16. static DEFINE_MUTEX(list_lrus_mutex);
  17. static void list_lru_register(struct list_lru *lru)
  18. {
  19. mutex_lock(&list_lrus_mutex);
  20. list_add(&lru->list, &list_lrus);
  21. mutex_unlock(&list_lrus_mutex);
  22. }
  23. static void list_lru_unregister(struct list_lru *lru)
  24. {
  25. mutex_lock(&list_lrus_mutex);
  26. list_del(&lru->list);
  27. mutex_unlock(&list_lrus_mutex);
  28. }
  29. #else
  30. static void list_lru_register(struct list_lru *lru)
  31. {
  32. }
  33. static void list_lru_unregister(struct list_lru *lru)
  34. {
  35. }
  36. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  37. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  38. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  39. {
  40. /*
  41. * This needs node 0 to be always present, even
  42. * in the systems supporting sparse numa ids.
  43. */
  44. return !!lru->node[0].memcg_lrus;
  45. }
  46. static inline struct list_lru_one *
  47. list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  48. {
  49. struct list_lru_memcg *memcg_lrus;
  50. /*
  51. * Either lock or RCU protects the array of per cgroup lists
  52. * from relocation (see memcg_update_list_lru_node).
  53. */
  54. memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
  55. lockdep_is_held(&nlru->lock));
  56. if (memcg_lrus && idx >= 0)
  57. return memcg_lrus->lru[idx];
  58. return &nlru->lru;
  59. }
  60. static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  61. {
  62. struct page *page;
  63. if (!memcg_kmem_enabled())
  64. return NULL;
  65. page = virt_to_head_page(ptr);
  66. return page->mem_cgroup;
  67. }
  68. static inline struct list_lru_one *
  69. list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  70. {
  71. struct mem_cgroup *memcg;
  72. if (!nlru->memcg_lrus)
  73. return &nlru->lru;
  74. memcg = mem_cgroup_from_kmem(ptr);
  75. if (!memcg)
  76. return &nlru->lru;
  77. return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  78. }
  79. #else
  80. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  81. {
  82. return false;
  83. }
  84. static inline struct list_lru_one *
  85. list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  86. {
  87. return &nlru->lru;
  88. }
  89. static inline struct list_lru_one *
  90. list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  91. {
  92. return &nlru->lru;
  93. }
  94. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  95. bool list_lru_add(struct list_lru *lru, struct list_head *item)
  96. {
  97. int nid = page_to_nid(virt_to_page(item));
  98. struct list_lru_node *nlru = &lru->node[nid];
  99. struct list_lru_one *l;
  100. spin_lock(&nlru->lock);
  101. if (list_empty(item)) {
  102. l = list_lru_from_kmem(nlru, item);
  103. list_add_tail(item, &l->list);
  104. l->nr_items++;
  105. nlru->nr_items++;
  106. spin_unlock(&nlru->lock);
  107. return true;
  108. }
  109. spin_unlock(&nlru->lock);
  110. return false;
  111. }
  112. EXPORT_SYMBOL_GPL(list_lru_add);
  113. bool list_lru_del(struct list_lru *lru, struct list_head *item)
  114. {
  115. int nid = page_to_nid(virt_to_page(item));
  116. struct list_lru_node *nlru = &lru->node[nid];
  117. struct list_lru_one *l;
  118. spin_lock(&nlru->lock);
  119. if (!list_empty(item)) {
  120. l = list_lru_from_kmem(nlru, item);
  121. list_del_init(item);
  122. l->nr_items--;
  123. nlru->nr_items--;
  124. spin_unlock(&nlru->lock);
  125. return true;
  126. }
  127. spin_unlock(&nlru->lock);
  128. return false;
  129. }
  130. EXPORT_SYMBOL_GPL(list_lru_del);
  131. void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  132. {
  133. list_del_init(item);
  134. list->nr_items--;
  135. }
  136. EXPORT_SYMBOL_GPL(list_lru_isolate);
  137. void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  138. struct list_head *head)
  139. {
  140. list_move(item, head);
  141. list->nr_items--;
  142. }
  143. EXPORT_SYMBOL_GPL(list_lru_isolate_move);
  144. static unsigned long __list_lru_count_one(struct list_lru *lru,
  145. int nid, int memcg_idx)
  146. {
  147. struct list_lru_node *nlru = &lru->node[nid];
  148. struct list_lru_one *l;
  149. unsigned long count;
  150. rcu_read_lock();
  151. l = list_lru_from_memcg_idx(nlru, memcg_idx);
  152. count = l->nr_items;
  153. rcu_read_unlock();
  154. return count;
  155. }
  156. unsigned long list_lru_count_one(struct list_lru *lru,
  157. int nid, struct mem_cgroup *memcg)
  158. {
  159. return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
  160. }
  161. EXPORT_SYMBOL_GPL(list_lru_count_one);
  162. unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  163. {
  164. struct list_lru_node *nlru;
  165. nlru = &lru->node[nid];
  166. return nlru->nr_items;
  167. }
  168. EXPORT_SYMBOL_GPL(list_lru_count_node);
  169. static unsigned long
  170. __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
  171. list_lru_walk_cb isolate, void *cb_arg,
  172. unsigned long *nr_to_walk)
  173. {
  174. struct list_lru_node *nlru = &lru->node[nid];
  175. struct list_lru_one *l;
  176. struct list_head *item, *n;
  177. unsigned long isolated = 0;
  178. spin_lock(&nlru->lock);
  179. l = list_lru_from_memcg_idx(nlru, memcg_idx);
  180. restart:
  181. list_for_each_safe(item, n, &l->list) {
  182. enum lru_status ret;
  183. /*
  184. * decrement nr_to_walk first so that we don't livelock if we
  185. * get stuck on large numbesr of LRU_RETRY items
  186. */
  187. if (!*nr_to_walk)
  188. break;
  189. --*nr_to_walk;
  190. ret = isolate(item, l, &nlru->lock, cb_arg);
  191. switch (ret) {
  192. case LRU_REMOVED_RETRY:
  193. assert_spin_locked(&nlru->lock);
  194. /* fall through */
  195. case LRU_REMOVED:
  196. isolated++;
  197. nlru->nr_items--;
  198. /*
  199. * If the lru lock has been dropped, our list
  200. * traversal is now invalid and so we have to
  201. * restart from scratch.
  202. */
  203. if (ret == LRU_REMOVED_RETRY)
  204. goto restart;
  205. break;
  206. case LRU_ROTATE:
  207. list_move_tail(item, &l->list);
  208. break;
  209. case LRU_SKIP:
  210. break;
  211. case LRU_RETRY:
  212. /*
  213. * The lru lock has been dropped, our list traversal is
  214. * now invalid and so we have to restart from scratch.
  215. */
  216. assert_spin_locked(&nlru->lock);
  217. goto restart;
  218. default:
  219. BUG();
  220. }
  221. }
  222. spin_unlock(&nlru->lock);
  223. return isolated;
  224. }
  225. unsigned long
  226. list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  227. list_lru_walk_cb isolate, void *cb_arg,
  228. unsigned long *nr_to_walk)
  229. {
  230. return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
  231. isolate, cb_arg, nr_to_walk);
  232. }
  233. EXPORT_SYMBOL_GPL(list_lru_walk_one);
  234. unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  235. list_lru_walk_cb isolate, void *cb_arg,
  236. unsigned long *nr_to_walk)
  237. {
  238. long isolated = 0;
  239. int memcg_idx;
  240. isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
  241. nr_to_walk);
  242. if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  243. for_each_memcg_cache_index(memcg_idx) {
  244. isolated += __list_lru_walk_one(lru, nid, memcg_idx,
  245. isolate, cb_arg, nr_to_walk);
  246. if (*nr_to_walk <= 0)
  247. break;
  248. }
  249. }
  250. return isolated;
  251. }
  252. EXPORT_SYMBOL_GPL(list_lru_walk_node);
  253. static void init_one_lru(struct list_lru_one *l)
  254. {
  255. INIT_LIST_HEAD(&l->list);
  256. l->nr_items = 0;
  257. }
  258. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  259. static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  260. int begin, int end)
  261. {
  262. int i;
  263. for (i = begin; i < end; i++)
  264. kfree(memcg_lrus->lru[i]);
  265. }
  266. static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  267. int begin, int end)
  268. {
  269. int i;
  270. for (i = begin; i < end; i++) {
  271. struct list_lru_one *l;
  272. l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  273. if (!l)
  274. goto fail;
  275. init_one_lru(l);
  276. memcg_lrus->lru[i] = l;
  277. }
  278. return 0;
  279. fail:
  280. __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
  281. return -ENOMEM;
  282. }
  283. static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  284. {
  285. struct list_lru_memcg *memcg_lrus;
  286. int size = memcg_nr_cache_ids;
  287. memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
  288. size * sizeof(void *), GFP_KERNEL);
  289. if (!memcg_lrus)
  290. return -ENOMEM;
  291. if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
  292. kvfree(memcg_lrus);
  293. return -ENOMEM;
  294. }
  295. RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
  296. return 0;
  297. }
  298. static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  299. {
  300. struct list_lru_memcg *memcg_lrus;
  301. /*
  302. * This is called when shrinker has already been unregistered,
  303. * and nobody can use it. So, there is no need to use kvfree_rcu().
  304. */
  305. memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
  306. __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
  307. kvfree(memcg_lrus);
  308. }
  309. static void kvfree_rcu(struct rcu_head *head)
  310. {
  311. struct list_lru_memcg *mlru;
  312. mlru = container_of(head, struct list_lru_memcg, rcu);
  313. kvfree(mlru);
  314. }
  315. static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  316. int old_size, int new_size)
  317. {
  318. struct list_lru_memcg *old, *new;
  319. BUG_ON(old_size > new_size);
  320. old = rcu_dereference_protected(nlru->memcg_lrus,
  321. lockdep_is_held(&list_lrus_mutex));
  322. new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
  323. if (!new)
  324. return -ENOMEM;
  325. if (__memcg_init_list_lru_node(new, old_size, new_size)) {
  326. kvfree(new);
  327. return -ENOMEM;
  328. }
  329. memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
  330. /*
  331. * The locking below allows readers that hold nlru->lock avoid taking
  332. * rcu_read_lock (see list_lru_from_memcg_idx).
  333. *
  334. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  335. * we have to use IRQ-safe primitives here to avoid deadlock.
  336. */
  337. spin_lock_irq(&nlru->lock);
  338. rcu_assign_pointer(nlru->memcg_lrus, new);
  339. spin_unlock_irq(&nlru->lock);
  340. call_rcu(&old->rcu, kvfree_rcu);
  341. return 0;
  342. }
  343. static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  344. int old_size, int new_size)
  345. {
  346. struct list_lru_memcg *memcg_lrus;
  347. memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
  348. lockdep_is_held(&list_lrus_mutex));
  349. /* do not bother shrinking the array back to the old size, because we
  350. * cannot handle allocation failures here */
  351. __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
  352. }
  353. static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  354. {
  355. int i;
  356. if (!memcg_aware)
  357. return 0;
  358. for_each_node(i) {
  359. if (memcg_init_list_lru_node(&lru->node[i]))
  360. goto fail;
  361. }
  362. return 0;
  363. fail:
  364. for (i = i - 1; i >= 0; i--) {
  365. if (!lru->node[i].memcg_lrus)
  366. continue;
  367. memcg_destroy_list_lru_node(&lru->node[i]);
  368. }
  369. return -ENOMEM;
  370. }
  371. static void memcg_destroy_list_lru(struct list_lru *lru)
  372. {
  373. int i;
  374. if (!list_lru_memcg_aware(lru))
  375. return;
  376. for_each_node(i)
  377. memcg_destroy_list_lru_node(&lru->node[i]);
  378. }
  379. static int memcg_update_list_lru(struct list_lru *lru,
  380. int old_size, int new_size)
  381. {
  382. int i;
  383. if (!list_lru_memcg_aware(lru))
  384. return 0;
  385. for_each_node(i) {
  386. if (memcg_update_list_lru_node(&lru->node[i],
  387. old_size, new_size))
  388. goto fail;
  389. }
  390. return 0;
  391. fail:
  392. for (i = i - 1; i >= 0; i--) {
  393. if (!lru->node[i].memcg_lrus)
  394. continue;
  395. memcg_cancel_update_list_lru_node(&lru->node[i],
  396. old_size, new_size);
  397. }
  398. return -ENOMEM;
  399. }
  400. static void memcg_cancel_update_list_lru(struct list_lru *lru,
  401. int old_size, int new_size)
  402. {
  403. int i;
  404. if (!list_lru_memcg_aware(lru))
  405. return;
  406. for_each_node(i)
  407. memcg_cancel_update_list_lru_node(&lru->node[i],
  408. old_size, new_size);
  409. }
  410. int memcg_update_all_list_lrus(int new_size)
  411. {
  412. int ret = 0;
  413. struct list_lru *lru;
  414. int old_size = memcg_nr_cache_ids;
  415. mutex_lock(&list_lrus_mutex);
  416. list_for_each_entry(lru, &list_lrus, list) {
  417. ret = memcg_update_list_lru(lru, old_size, new_size);
  418. if (ret)
  419. goto fail;
  420. }
  421. out:
  422. mutex_unlock(&list_lrus_mutex);
  423. return ret;
  424. fail:
  425. list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  426. memcg_cancel_update_list_lru(lru, old_size, new_size);
  427. goto out;
  428. }
  429. static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
  430. int src_idx, int dst_idx)
  431. {
  432. struct list_lru_one *src, *dst;
  433. /*
  434. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  435. * we have to use IRQ-safe primitives here to avoid deadlock.
  436. */
  437. spin_lock_irq(&nlru->lock);
  438. src = list_lru_from_memcg_idx(nlru, src_idx);
  439. dst = list_lru_from_memcg_idx(nlru, dst_idx);
  440. list_splice_init(&src->list, &dst->list);
  441. dst->nr_items += src->nr_items;
  442. src->nr_items = 0;
  443. spin_unlock_irq(&nlru->lock);
  444. }
  445. static void memcg_drain_list_lru(struct list_lru *lru,
  446. int src_idx, int dst_idx)
  447. {
  448. int i;
  449. if (!list_lru_memcg_aware(lru))
  450. return;
  451. for_each_node(i)
  452. memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
  453. }
  454. void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
  455. {
  456. struct list_lru *lru;
  457. mutex_lock(&list_lrus_mutex);
  458. list_for_each_entry(lru, &list_lrus, list)
  459. memcg_drain_list_lru(lru, src_idx, dst_idx);
  460. mutex_unlock(&list_lrus_mutex);
  461. }
  462. #else
  463. static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  464. {
  465. return 0;
  466. }
  467. static void memcg_destroy_list_lru(struct list_lru *lru)
  468. {
  469. }
  470. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  471. int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  472. struct lock_class_key *key)
  473. {
  474. int i;
  475. size_t size = sizeof(*lru->node) * nr_node_ids;
  476. int err = -ENOMEM;
  477. memcg_get_cache_ids();
  478. lru->node = kzalloc(size, GFP_KERNEL);
  479. if (!lru->node)
  480. goto out;
  481. for_each_node(i) {
  482. spin_lock_init(&lru->node[i].lock);
  483. if (key)
  484. lockdep_set_class(&lru->node[i].lock, key);
  485. init_one_lru(&lru->node[i].lru);
  486. }
  487. err = memcg_init_list_lru(lru, memcg_aware);
  488. if (err) {
  489. kfree(lru->node);
  490. /* Do this so a list_lru_destroy() doesn't crash: */
  491. lru->node = NULL;
  492. goto out;
  493. }
  494. list_lru_register(lru);
  495. out:
  496. memcg_put_cache_ids();
  497. return err;
  498. }
  499. EXPORT_SYMBOL_GPL(__list_lru_init);
  500. void list_lru_destroy(struct list_lru *lru)
  501. {
  502. /* Already destroyed or not yet initialized? */
  503. if (!lru->node)
  504. return;
  505. memcg_get_cache_ids();
  506. list_lru_unregister(lru);
  507. memcg_destroy_list_lru(lru);
  508. kfree(lru->node);
  509. lru->node = NULL;
  510. memcg_put_cache_ids();
  511. }
  512. EXPORT_SYMBOL_GPL(list_lru_destroy);