shrinker.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /*
  2. * f2fs shrinker support
  3. * the basic infra was copied from fs/ubifs/shrinker.c
  4. *
  5. * Copyright (c) 2015 Motorola Mobility
  6. * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/fs.h>
  13. #include <linux/f2fs_fs.h>
  14. #include "f2fs.h"
  15. static LIST_HEAD(f2fs_list);
  16. static DEFINE_SPINLOCK(f2fs_list_lock);
  17. static unsigned int shrinker_run_no;
  18. static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
  19. {
  20. return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
  21. }
  22. static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
  23. {
  24. if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
  25. return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
  26. return 0;
  27. }
  28. static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
  29. {
  30. return atomic_read(&sbi->total_zombie_tree) +
  31. atomic_read(&sbi->total_ext_node);
  32. }
  33. unsigned long f2fs_shrink_count(struct shrinker *shrink,
  34. struct shrink_control *sc)
  35. {
  36. struct f2fs_sb_info *sbi;
  37. struct list_head *p;
  38. unsigned long count = 0;
  39. spin_lock(&f2fs_list_lock);
  40. p = f2fs_list.next;
  41. while (p != &f2fs_list) {
  42. sbi = list_entry(p, struct f2fs_sb_info, s_list);
  43. /* stop f2fs_put_super */
  44. if (!mutex_trylock(&sbi->umount_mutex)) {
  45. p = p->next;
  46. continue;
  47. }
  48. spin_unlock(&f2fs_list_lock);
  49. /* count extent cache entries */
  50. count += __count_extent_cache(sbi);
  51. /* shrink clean nat cache entries */
  52. count += __count_nat_entries(sbi);
  53. /* count free nids cache entries */
  54. count += __count_free_nids(sbi);
  55. spin_lock(&f2fs_list_lock);
  56. p = p->next;
  57. mutex_unlock(&sbi->umount_mutex);
  58. }
  59. spin_unlock(&f2fs_list_lock);
  60. return count;
  61. }
  62. unsigned long f2fs_shrink_scan(struct shrinker *shrink,
  63. struct shrink_control *sc)
  64. {
  65. unsigned long nr = sc->nr_to_scan;
  66. struct f2fs_sb_info *sbi;
  67. struct list_head *p;
  68. unsigned int run_no;
  69. unsigned long freed = 0;
  70. spin_lock(&f2fs_list_lock);
  71. do {
  72. run_no = ++shrinker_run_no;
  73. } while (run_no == 0);
  74. p = f2fs_list.next;
  75. while (p != &f2fs_list) {
  76. sbi = list_entry(p, struct f2fs_sb_info, s_list);
  77. if (sbi->shrinker_run_no == run_no)
  78. break;
  79. /* stop f2fs_put_super */
  80. if (!mutex_trylock(&sbi->umount_mutex)) {
  81. p = p->next;
  82. continue;
  83. }
  84. spin_unlock(&f2fs_list_lock);
  85. sbi->shrinker_run_no = run_no;
  86. /* shrink extent cache entries */
  87. freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
  88. /* shrink clean nat cache entries */
  89. if (freed < nr)
  90. freed += try_to_free_nats(sbi, nr - freed);
  91. /* shrink free nids cache entries */
  92. if (freed < nr)
  93. freed += try_to_free_nids(sbi, nr - freed);
  94. spin_lock(&f2fs_list_lock);
  95. p = p->next;
  96. list_move_tail(&sbi->s_list, &f2fs_list);
  97. mutex_unlock(&sbi->umount_mutex);
  98. if (freed >= nr)
  99. break;
  100. }
  101. spin_unlock(&f2fs_list_lock);
  102. return freed;
  103. }
  104. void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
  105. {
  106. spin_lock(&f2fs_list_lock);
  107. list_add_tail(&sbi->s_list, &f2fs_list);
  108. spin_unlock(&f2fs_list_lock);
  109. }
  110. void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
  111. {
  112. f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
  113. spin_lock(&f2fs_list_lock);
  114. list_del(&sbi->s_list);
  115. spin_unlock(&f2fs_list_lock);
  116. }