list_lru.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. /*
  2. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3. * Authors: David Chinner and Glauber Costa
  4. *
  5. * Generic LRU infrastructure
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/list_lru.h>
  11. #include <linux/slab.h>
  12. #include <linux/mutex.h>
  13. #include <linux/memcontrol.h>
  14. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  15. static LIST_HEAD(list_lrus);
  16. static DEFINE_MUTEX(list_lrus_mutex);
  17. static void list_lru_register(struct list_lru *lru)
  18. {
  19. mutex_lock(&list_lrus_mutex);
  20. list_add(&lru->list, &list_lrus);
  21. mutex_unlock(&list_lrus_mutex);
  22. }
  23. static void list_lru_unregister(struct list_lru *lru)
  24. {
  25. mutex_lock(&list_lrus_mutex);
  26. list_del(&lru->list);
  27. mutex_unlock(&list_lrus_mutex);
  28. }
  29. #else
  30. static void list_lru_register(struct list_lru *lru)
  31. {
  32. }
  33. static void list_lru_unregister(struct list_lru *lru)
  34. {
  35. }
  36. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  37. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  38. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  39. {
  40. /*
  41. * This needs node 0 to be always present, even
  42. * in the systems supporting sparse numa ids.
  43. */
  44. return !!lru->node[0].memcg_lrus;
  45. }
  46. static inline struct list_lru_one *
  47. list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  48. {
  49. /*
  50. * The lock protects the array of per cgroup lists from relocation
  51. * (see memcg_update_list_lru_node).
  52. */
  53. lockdep_assert_held(&nlru->lock);
  54. if (nlru->memcg_lrus && idx >= 0)
  55. return nlru->memcg_lrus->lru[idx];
  56. return &nlru->lru;
  57. }
  58. static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  59. {
  60. struct page *page;
  61. if (!memcg_kmem_enabled())
  62. return NULL;
  63. page = virt_to_head_page(ptr);
  64. return page->mem_cgroup;
  65. }
  66. static inline struct list_lru_one *
  67. list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  68. {
  69. struct mem_cgroup *memcg;
  70. if (!nlru->memcg_lrus)
  71. return &nlru->lru;
  72. memcg = mem_cgroup_from_kmem(ptr);
  73. if (!memcg)
  74. return &nlru->lru;
  75. return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  76. }
  77. #else
  78. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  79. {
  80. return false;
  81. }
  82. static inline struct list_lru_one *
  83. list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  84. {
  85. return &nlru->lru;
  86. }
  87. static inline struct list_lru_one *
  88. list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  89. {
  90. return &nlru->lru;
  91. }
  92. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  93. bool list_lru_add(struct list_lru *lru, struct list_head *item)
  94. {
  95. int nid = page_to_nid(virt_to_page(item));
  96. struct list_lru_node *nlru = &lru->node[nid];
  97. struct list_lru_one *l;
  98. spin_lock(&nlru->lock);
  99. if (list_empty(item)) {
  100. l = list_lru_from_kmem(nlru, item);
  101. list_add_tail(item, &l->list);
  102. l->nr_items++;
  103. spin_unlock(&nlru->lock);
  104. return true;
  105. }
  106. spin_unlock(&nlru->lock);
  107. return false;
  108. }
  109. EXPORT_SYMBOL_GPL(list_lru_add);
  110. bool list_lru_del(struct list_lru *lru, struct list_head *item)
  111. {
  112. int nid = page_to_nid(virt_to_page(item));
  113. struct list_lru_node *nlru = &lru->node[nid];
  114. struct list_lru_one *l;
  115. spin_lock(&nlru->lock);
  116. if (!list_empty(item)) {
  117. l = list_lru_from_kmem(nlru, item);
  118. list_del_init(item);
  119. l->nr_items--;
  120. spin_unlock(&nlru->lock);
  121. return true;
  122. }
  123. spin_unlock(&nlru->lock);
  124. return false;
  125. }
  126. EXPORT_SYMBOL_GPL(list_lru_del);
  127. void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  128. {
  129. list_del_init(item);
  130. list->nr_items--;
  131. }
  132. EXPORT_SYMBOL_GPL(list_lru_isolate);
  133. void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  134. struct list_head *head)
  135. {
  136. list_move(item, head);
  137. list->nr_items--;
  138. }
  139. EXPORT_SYMBOL_GPL(list_lru_isolate_move);
  140. static unsigned long __list_lru_count_one(struct list_lru *lru,
  141. int nid, int memcg_idx)
  142. {
  143. struct list_lru_node *nlru = &lru->node[nid];
  144. struct list_lru_one *l;
  145. unsigned long count;
  146. spin_lock(&nlru->lock);
  147. l = list_lru_from_memcg_idx(nlru, memcg_idx);
  148. count = l->nr_items;
  149. spin_unlock(&nlru->lock);
  150. return count;
  151. }
  152. unsigned long list_lru_count_one(struct list_lru *lru,
  153. int nid, struct mem_cgroup *memcg)
  154. {
  155. return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
  156. }
  157. EXPORT_SYMBOL_GPL(list_lru_count_one);
  158. unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  159. {
  160. long count = 0;
  161. int memcg_idx;
  162. count += __list_lru_count_one(lru, nid, -1);
  163. if (list_lru_memcg_aware(lru)) {
  164. for_each_memcg_cache_index(memcg_idx)
  165. count += __list_lru_count_one(lru, nid, memcg_idx);
  166. }
  167. return count;
  168. }
  169. EXPORT_SYMBOL_GPL(list_lru_count_node);
  170. static unsigned long
  171. __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
  172. list_lru_walk_cb isolate, void *cb_arg,
  173. unsigned long *nr_to_walk)
  174. {
  175. struct list_lru_node *nlru = &lru->node[nid];
  176. struct list_lru_one *l;
  177. struct list_head *item, *n;
  178. unsigned long isolated = 0;
  179. spin_lock(&nlru->lock);
  180. l = list_lru_from_memcg_idx(nlru, memcg_idx);
  181. restart:
  182. list_for_each_safe(item, n, &l->list) {
  183. enum lru_status ret;
  184. /*
  185. * decrement nr_to_walk first so that we don't livelock if we
  186. * get stuck on large numbesr of LRU_RETRY items
  187. */
  188. if (!*nr_to_walk)
  189. break;
  190. --*nr_to_walk;
  191. ret = isolate(item, l, &nlru->lock, cb_arg);
  192. switch (ret) {
  193. case LRU_REMOVED_RETRY:
  194. assert_spin_locked(&nlru->lock);
  195. case LRU_REMOVED:
  196. isolated++;
  197. /*
  198. * If the lru lock has been dropped, our list
  199. * traversal is now invalid and so we have to
  200. * restart from scratch.
  201. */
  202. if (ret == LRU_REMOVED_RETRY)
  203. goto restart;
  204. break;
  205. case LRU_ROTATE:
  206. list_move_tail(item, &l->list);
  207. break;
  208. case LRU_SKIP:
  209. break;
  210. case LRU_RETRY:
  211. /*
  212. * The lru lock has been dropped, our list traversal is
  213. * now invalid and so we have to restart from scratch.
  214. */
  215. assert_spin_locked(&nlru->lock);
  216. goto restart;
  217. default:
  218. BUG();
  219. }
  220. }
  221. spin_unlock(&nlru->lock);
  222. return isolated;
  223. }
  224. unsigned long
  225. list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  226. list_lru_walk_cb isolate, void *cb_arg,
  227. unsigned long *nr_to_walk)
  228. {
  229. return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
  230. isolate, cb_arg, nr_to_walk);
  231. }
  232. EXPORT_SYMBOL_GPL(list_lru_walk_one);
  233. unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  234. list_lru_walk_cb isolate, void *cb_arg,
  235. unsigned long *nr_to_walk)
  236. {
  237. long isolated = 0;
  238. int memcg_idx;
  239. isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
  240. nr_to_walk);
  241. if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  242. for_each_memcg_cache_index(memcg_idx) {
  243. isolated += __list_lru_walk_one(lru, nid, memcg_idx,
  244. isolate, cb_arg, nr_to_walk);
  245. if (*nr_to_walk <= 0)
  246. break;
  247. }
  248. }
  249. return isolated;
  250. }
  251. EXPORT_SYMBOL_GPL(list_lru_walk_node);
  252. static void init_one_lru(struct list_lru_one *l)
  253. {
  254. INIT_LIST_HEAD(&l->list);
  255. l->nr_items = 0;
  256. }
  257. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  258. static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  259. int begin, int end)
  260. {
  261. int i;
  262. for (i = begin; i < end; i++)
  263. kfree(memcg_lrus->lru[i]);
  264. }
  265. static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  266. int begin, int end)
  267. {
  268. int i;
  269. for (i = begin; i < end; i++) {
  270. struct list_lru_one *l;
  271. l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  272. if (!l)
  273. goto fail;
  274. init_one_lru(l);
  275. memcg_lrus->lru[i] = l;
  276. }
  277. return 0;
  278. fail:
  279. __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
  280. return -ENOMEM;
  281. }
  282. static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  283. {
  284. int size = memcg_nr_cache_ids;
  285. nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
  286. if (!nlru->memcg_lrus)
  287. return -ENOMEM;
  288. if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
  289. kfree(nlru->memcg_lrus);
  290. return -ENOMEM;
  291. }
  292. return 0;
  293. }
  294. static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  295. {
  296. __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
  297. kfree(nlru->memcg_lrus);
  298. }
  299. static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  300. int old_size, int new_size)
  301. {
  302. struct list_lru_memcg *old, *new;
  303. BUG_ON(old_size > new_size);
  304. old = nlru->memcg_lrus;
  305. new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
  306. if (!new)
  307. return -ENOMEM;
  308. if (__memcg_init_list_lru_node(new, old_size, new_size)) {
  309. kfree(new);
  310. return -ENOMEM;
  311. }
  312. memcpy(new, old, old_size * sizeof(void *));
  313. /*
  314. * The lock guarantees that we won't race with a reader
  315. * (see list_lru_from_memcg_idx).
  316. *
  317. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  318. * we have to use IRQ-safe primitives here to avoid deadlock.
  319. */
  320. spin_lock_irq(&nlru->lock);
  321. nlru->memcg_lrus = new;
  322. spin_unlock_irq(&nlru->lock);
  323. kfree(old);
  324. return 0;
  325. }
  326. static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  327. int old_size, int new_size)
  328. {
  329. /* do not bother shrinking the array back to the old size, because we
  330. * cannot handle allocation failures here */
  331. __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
  332. }
  333. static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  334. {
  335. int i;
  336. if (!memcg_aware)
  337. return 0;
  338. for_each_node(i) {
  339. if (memcg_init_list_lru_node(&lru->node[i]))
  340. goto fail;
  341. }
  342. return 0;
  343. fail:
  344. for (i = i - 1; i >= 0; i--) {
  345. if (!lru->node[i].memcg_lrus)
  346. continue;
  347. memcg_destroy_list_lru_node(&lru->node[i]);
  348. }
  349. return -ENOMEM;
  350. }
  351. static void memcg_destroy_list_lru(struct list_lru *lru)
  352. {
  353. int i;
  354. if (!list_lru_memcg_aware(lru))
  355. return;
  356. for_each_node(i)
  357. memcg_destroy_list_lru_node(&lru->node[i]);
  358. }
  359. static int memcg_update_list_lru(struct list_lru *lru,
  360. int old_size, int new_size)
  361. {
  362. int i;
  363. if (!list_lru_memcg_aware(lru))
  364. return 0;
  365. for_each_node(i) {
  366. if (memcg_update_list_lru_node(&lru->node[i],
  367. old_size, new_size))
  368. goto fail;
  369. }
  370. return 0;
  371. fail:
  372. for (i = i - 1; i >= 0; i--) {
  373. if (!lru->node[i].memcg_lrus)
  374. continue;
  375. memcg_cancel_update_list_lru_node(&lru->node[i],
  376. old_size, new_size);
  377. }
  378. return -ENOMEM;
  379. }
  380. static void memcg_cancel_update_list_lru(struct list_lru *lru,
  381. int old_size, int new_size)
  382. {
  383. int i;
  384. if (!list_lru_memcg_aware(lru))
  385. return;
  386. for_each_node(i)
  387. memcg_cancel_update_list_lru_node(&lru->node[i],
  388. old_size, new_size);
  389. }
  390. int memcg_update_all_list_lrus(int new_size)
  391. {
  392. int ret = 0;
  393. struct list_lru *lru;
  394. int old_size = memcg_nr_cache_ids;
  395. mutex_lock(&list_lrus_mutex);
  396. list_for_each_entry(lru, &list_lrus, list) {
  397. ret = memcg_update_list_lru(lru, old_size, new_size);
  398. if (ret)
  399. goto fail;
  400. }
  401. out:
  402. mutex_unlock(&list_lrus_mutex);
  403. return ret;
  404. fail:
  405. list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  406. memcg_cancel_update_list_lru(lru, old_size, new_size);
  407. goto out;
  408. }
  409. static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
  410. int src_idx, int dst_idx)
  411. {
  412. struct list_lru_one *src, *dst;
  413. /*
  414. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  415. * we have to use IRQ-safe primitives here to avoid deadlock.
  416. */
  417. spin_lock_irq(&nlru->lock);
  418. src = list_lru_from_memcg_idx(nlru, src_idx);
  419. dst = list_lru_from_memcg_idx(nlru, dst_idx);
  420. list_splice_init(&src->list, &dst->list);
  421. dst->nr_items += src->nr_items;
  422. src->nr_items = 0;
  423. spin_unlock_irq(&nlru->lock);
  424. }
  425. static void memcg_drain_list_lru(struct list_lru *lru,
  426. int src_idx, int dst_idx)
  427. {
  428. int i;
  429. if (!list_lru_memcg_aware(lru))
  430. return;
  431. for_each_node(i)
  432. memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
  433. }
  434. void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
  435. {
  436. struct list_lru *lru;
  437. mutex_lock(&list_lrus_mutex);
  438. list_for_each_entry(lru, &list_lrus, list)
  439. memcg_drain_list_lru(lru, src_idx, dst_idx);
  440. mutex_unlock(&list_lrus_mutex);
  441. }
  442. #else
  443. static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  444. {
  445. return 0;
  446. }
  447. static void memcg_destroy_list_lru(struct list_lru *lru)
  448. {
  449. }
  450. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  451. int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  452. struct lock_class_key *key)
  453. {
  454. int i;
  455. size_t size = sizeof(*lru->node) * nr_node_ids;
  456. int err = -ENOMEM;
  457. memcg_get_cache_ids();
  458. lru->node = kzalloc(size, GFP_KERNEL);
  459. if (!lru->node)
  460. goto out;
  461. for_each_node(i) {
  462. spin_lock_init(&lru->node[i].lock);
  463. if (key)
  464. lockdep_set_class(&lru->node[i].lock, key);
  465. init_one_lru(&lru->node[i].lru);
  466. }
  467. err = memcg_init_list_lru(lru, memcg_aware);
  468. if (err) {
  469. kfree(lru->node);
  470. /* Do this so a list_lru_destroy() doesn't crash: */
  471. lru->node = NULL;
  472. goto out;
  473. }
  474. list_lru_register(lru);
  475. out:
  476. memcg_put_cache_ids();
  477. return err;
  478. }
  479. EXPORT_SYMBOL_GPL(__list_lru_init);
  480. void list_lru_destroy(struct list_lru *lru)
  481. {
  482. /* Already destroyed or not yet initialized? */
  483. if (!lru->node)
  484. return;
  485. memcg_get_cache_ids();
  486. list_lru_unregister(lru);
  487. memcg_destroy_list_lru(lru);
  488. kfree(lru->node);
  489. lru->node = NULL;
  490. memcg_put_cache_ids();
  491. }
  492. EXPORT_SYMBOL_GPL(list_lru_destroy);