quarantine.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * KASAN quarantine.
  3. *
  4. * Author: Alexander Potapenko <glider@google.com>
  5. * Copyright (C) 2016 Google, Inc.
  6. *
  7. * Based on code by Dmitry Chernenkov.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. */
  19. #include <linux/gfp.h>
  20. #include <linux/hash.h>
  21. #include <linux/kernel.h>
  22. #include <linux/mm.h>
  23. #include <linux/percpu.h>
  24. #include <linux/printk.h>
  25. #include <linux/shrinker.h>
  26. #include <linux/slab.h>
  27. #include <linux/string.h>
  28. #include <linux/types.h>
  29. #include "../slab.h"
  30. #include "kasan.h"
  31. /* Data structure and operations for quarantine queues. */
  32. /*
  33. * Each queue is a signle-linked list, which also stores the total size of
  34. * objects inside of it.
  35. */
  36. struct qlist_head {
  37. struct qlist_node *head;
  38. struct qlist_node *tail;
  39. size_t bytes;
  40. };
  41. #define QLIST_INIT { NULL, NULL, 0 }
  42. static bool qlist_empty(struct qlist_head *q)
  43. {
  44. return !q->head;
  45. }
  46. static void qlist_init(struct qlist_head *q)
  47. {
  48. q->head = q->tail = NULL;
  49. q->bytes = 0;
  50. }
  51. static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
  52. size_t size)
  53. {
  54. if (unlikely(qlist_empty(q)))
  55. q->head = qlink;
  56. else
  57. q->tail->next = qlink;
  58. q->tail = qlink;
  59. qlink->next = NULL;
  60. q->bytes += size;
  61. }
  62. static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
  63. {
  64. if (unlikely(qlist_empty(from)))
  65. return;
  66. if (qlist_empty(to)) {
  67. *to = *from;
  68. qlist_init(from);
  69. return;
  70. }
  71. to->tail->next = from->head;
  72. to->tail = from->tail;
  73. to->bytes += from->bytes;
  74. qlist_init(from);
  75. }
  76. #define QUARANTINE_PERCPU_SIZE (1 << 20)
  77. #define QUARANTINE_BATCHES \
  78. (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
  79. /*
  80. * The object quarantine consists of per-cpu queues and a global queue,
  81. * guarded by quarantine_lock.
  82. */
  83. static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
  84. /* Round-robin FIFO array of batches. */
  85. static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
  86. static int quarantine_head;
  87. static int quarantine_tail;
  88. /* Total size of all objects in global_quarantine across all batches. */
  89. static unsigned long quarantine_size;
  90. static DEFINE_SPINLOCK(quarantine_lock);
  91. /* Maximum size of the global queue. */
  92. static unsigned long quarantine_max_size;
  93. /*
  94. * Target size of a batch in global_quarantine.
  95. * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
  96. */
  97. static unsigned long quarantine_batch_size;
  98. /*
  99. * The fraction of physical memory the quarantine is allowed to occupy.
  100. * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
  101. * the ratio low to avoid OOM.
  102. */
  103. #define QUARANTINE_FRACTION 32
  104. static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
  105. {
  106. return virt_to_head_page(qlink)->slab_cache;
  107. }
  108. static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
  109. {
  110. struct kasan_free_meta *free_info =
  111. container_of(qlink, struct kasan_free_meta,
  112. quarantine_link);
  113. return ((void *)free_info) - cache->kasan_info.free_meta_offset;
  114. }
  115. static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
  116. {
  117. void *object = qlink_to_object(qlink, cache);
  118. unsigned long flags;
  119. if (IS_ENABLED(CONFIG_SLAB))
  120. local_irq_save(flags);
  121. ___cache_free(cache, object, _THIS_IP_);
  122. if (IS_ENABLED(CONFIG_SLAB))
  123. local_irq_restore(flags);
  124. }
  125. static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
  126. {
  127. struct qlist_node *qlink;
  128. if (unlikely(qlist_empty(q)))
  129. return;
  130. qlink = q->head;
  131. while (qlink) {
  132. struct kmem_cache *obj_cache =
  133. cache ? cache : qlink_to_cache(qlink);
  134. struct qlist_node *next = qlink->next;
  135. qlink_free(qlink, obj_cache);
  136. qlink = next;
  137. }
  138. qlist_init(q);
  139. }
  140. void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
  141. {
  142. unsigned long flags;
  143. struct qlist_head *q;
  144. struct qlist_head temp = QLIST_INIT;
  145. local_irq_save(flags);
  146. q = this_cpu_ptr(&cpu_quarantine);
  147. qlist_put(q, &info->quarantine_link, cache->size);
  148. if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE))
  149. qlist_move_all(q, &temp);
  150. local_irq_restore(flags);
  151. if (unlikely(!qlist_empty(&temp))) {
  152. spin_lock_irqsave(&quarantine_lock, flags);
  153. WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
  154. qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
  155. if (global_quarantine[quarantine_tail].bytes >=
  156. READ_ONCE(quarantine_batch_size)) {
  157. int new_tail;
  158. new_tail = quarantine_tail + 1;
  159. if (new_tail == QUARANTINE_BATCHES)
  160. new_tail = 0;
  161. if (new_tail != quarantine_head)
  162. quarantine_tail = new_tail;
  163. }
  164. spin_unlock_irqrestore(&quarantine_lock, flags);
  165. }
  166. }
  167. void quarantine_reduce(void)
  168. {
  169. size_t total_size, new_quarantine_size, percpu_quarantines;
  170. unsigned long flags;
  171. struct qlist_head to_free = QLIST_INIT;
  172. if (likely(READ_ONCE(quarantine_size) <=
  173. READ_ONCE(quarantine_max_size)))
  174. return;
  175. spin_lock_irqsave(&quarantine_lock, flags);
  176. /*
  177. * Update quarantine size in case of hotplug. Allocate a fraction of
  178. * the installed memory to quarantine minus per-cpu queue limits.
  179. */
  180. total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
  181. QUARANTINE_FRACTION;
  182. percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
  183. new_quarantine_size = (total_size < percpu_quarantines) ?
  184. 0 : total_size - percpu_quarantines;
  185. WRITE_ONCE(quarantine_max_size, new_quarantine_size);
  186. /* Aim at consuming at most 1/2 of slots in quarantine. */
  187. WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
  188. 2 * total_size / QUARANTINE_BATCHES));
  189. if (likely(quarantine_size > quarantine_max_size)) {
  190. qlist_move_all(&global_quarantine[quarantine_head], &to_free);
  191. WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
  192. quarantine_head++;
  193. if (quarantine_head == QUARANTINE_BATCHES)
  194. quarantine_head = 0;
  195. }
  196. spin_unlock_irqrestore(&quarantine_lock, flags);
  197. qlist_free_all(&to_free, NULL);
  198. }
  199. static void qlist_move_cache(struct qlist_head *from,
  200. struct qlist_head *to,
  201. struct kmem_cache *cache)
  202. {
  203. struct qlist_node *curr;
  204. if (unlikely(qlist_empty(from)))
  205. return;
  206. curr = from->head;
  207. qlist_init(from);
  208. while (curr) {
  209. struct qlist_node *next = curr->next;
  210. struct kmem_cache *obj_cache = qlink_to_cache(curr);
  211. if (obj_cache == cache)
  212. qlist_put(to, curr, obj_cache->size);
  213. else
  214. qlist_put(from, curr, obj_cache->size);
  215. curr = next;
  216. }
  217. }
  218. static void per_cpu_remove_cache(void *arg)
  219. {
  220. struct kmem_cache *cache = arg;
  221. struct qlist_head to_free = QLIST_INIT;
  222. struct qlist_head *q;
  223. q = this_cpu_ptr(&cpu_quarantine);
  224. qlist_move_cache(q, &to_free, cache);
  225. qlist_free_all(&to_free, cache);
  226. }
  227. void quarantine_remove_cache(struct kmem_cache *cache)
  228. {
  229. unsigned long flags, i;
  230. struct qlist_head to_free = QLIST_INIT;
  231. on_each_cpu(per_cpu_remove_cache, cache, 1);
  232. spin_lock_irqsave(&quarantine_lock, flags);
  233. for (i = 0; i < QUARANTINE_BATCHES; i++)
  234. qlist_move_cache(&global_quarantine[i], &to_free, cache);
  235. spin_unlock_irqrestore(&quarantine_lock, flags);
  236. qlist_free_all(&to_free, cache);
  237. }