lkdtm_usercopy.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /*
  2. * This is for all the tests related to copy_to_user() and copy_from_user()
  3. * hardening.
  4. */
  5. #include "lkdtm.h"
  6. #include <linux/slab.h>
  7. #include <linux/vmalloc.h>
  8. #include <linux/mman.h>
  9. #include <linux/uaccess.h>
  10. #include <asm/cacheflush.h>
  11. static size_t cache_size = 1024;
  12. static struct kmem_cache *bad_cache;
  13. static const unsigned char test_text[] = "This is a test.\n";
  14. /*
  15. * Instead of adding -Wno-return-local-addr, just pass the stack address
  16. * through a function to obfuscate it from the compiler.
  17. */
  18. static noinline unsigned char *trick_compiler(unsigned char *stack)
  19. {
  20. return stack + 0;
  21. }
  22. static noinline unsigned char *do_usercopy_stack_callee(int value)
  23. {
  24. unsigned char buf[32];
  25. int i;
  26. /* Exercise stack to avoid everything living in registers. */
  27. for (i = 0; i < sizeof(buf); i++) {
  28. buf[i] = value & 0xff;
  29. }
  30. return trick_compiler(buf);
  31. }
  32. static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
  33. {
  34. unsigned long user_addr;
  35. unsigned char good_stack[32];
  36. unsigned char *bad_stack;
  37. int i;
  38. /* Exercise stack to avoid everything living in registers. */
  39. for (i = 0; i < sizeof(good_stack); i++)
  40. good_stack[i] = test_text[i % sizeof(test_text)];
  41. /* This is a pointer to outside our current stack frame. */
  42. if (bad_frame) {
  43. bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
  44. } else {
  45. /* Put start address just inside stack. */
  46. bad_stack = task_stack_page(current) + THREAD_SIZE;
  47. bad_stack -= sizeof(unsigned long);
  48. }
  49. user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
  50. PROT_READ | PROT_WRITE | PROT_EXEC,
  51. MAP_ANONYMOUS | MAP_PRIVATE, 0);
  52. if (user_addr >= TASK_SIZE) {
  53. pr_warn("Failed to allocate user memory\n");
  54. return;
  55. }
  56. if (to_user) {
  57. pr_info("attempting good copy_to_user of local stack\n");
  58. if (copy_to_user((void __user *)user_addr, good_stack,
  59. sizeof(good_stack))) {
  60. pr_warn("copy_to_user failed unexpectedly?!\n");
  61. goto free_user;
  62. }
  63. pr_info("attempting bad copy_to_user of distant stack\n");
  64. if (copy_to_user((void __user *)user_addr, bad_stack,
  65. sizeof(good_stack))) {
  66. pr_warn("copy_to_user failed, but lacked Oops\n");
  67. goto free_user;
  68. }
  69. } else {
  70. /*
  71. * There isn't a safe way to not be protected by usercopy
  72. * if we're going to write to another thread's stack.
  73. */
  74. if (!bad_frame)
  75. goto free_user;
  76. pr_info("attempting good copy_from_user of local stack\n");
  77. if (copy_from_user(good_stack, (void __user *)user_addr,
  78. sizeof(good_stack))) {
  79. pr_warn("copy_from_user failed unexpectedly?!\n");
  80. goto free_user;
  81. }
  82. pr_info("attempting bad copy_from_user of distant stack\n");
  83. if (copy_from_user(bad_stack, (void __user *)user_addr,
  84. sizeof(good_stack))) {
  85. pr_warn("copy_from_user failed, but lacked Oops\n");
  86. goto free_user;
  87. }
  88. }
  89. free_user:
  90. vm_munmap(user_addr, PAGE_SIZE);
  91. }
  92. static void do_usercopy_heap_size(bool to_user)
  93. {
  94. unsigned long user_addr;
  95. unsigned char *one, *two;
  96. const size_t size = 1024;
  97. one = kmalloc(size, GFP_KERNEL);
  98. two = kmalloc(size, GFP_KERNEL);
  99. if (!one || !two) {
  100. pr_warn("Failed to allocate kernel memory\n");
  101. goto free_kernel;
  102. }
  103. user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
  104. PROT_READ | PROT_WRITE | PROT_EXEC,
  105. MAP_ANONYMOUS | MAP_PRIVATE, 0);
  106. if (user_addr >= TASK_SIZE) {
  107. pr_warn("Failed to allocate user memory\n");
  108. goto free_kernel;
  109. }
  110. memset(one, 'A', size);
  111. memset(two, 'B', size);
  112. if (to_user) {
  113. pr_info("attempting good copy_to_user of correct size\n");
  114. if (copy_to_user((void __user *)user_addr, one, size)) {
  115. pr_warn("copy_to_user failed unexpectedly?!\n");
  116. goto free_user;
  117. }
  118. pr_info("attempting bad copy_to_user of too large size\n");
  119. if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
  120. pr_warn("copy_to_user failed, but lacked Oops\n");
  121. goto free_user;
  122. }
  123. } else {
  124. pr_info("attempting good copy_from_user of correct size\n");
  125. if (copy_from_user(one, (void __user *)user_addr, size)) {
  126. pr_warn("copy_from_user failed unexpectedly?!\n");
  127. goto free_user;
  128. }
  129. pr_info("attempting bad copy_from_user of too large size\n");
  130. if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
  131. pr_warn("copy_from_user failed, but lacked Oops\n");
  132. goto free_user;
  133. }
  134. }
  135. free_user:
  136. vm_munmap(user_addr, PAGE_SIZE);
  137. free_kernel:
  138. kfree(one);
  139. kfree(two);
  140. }
  141. static void do_usercopy_heap_flag(bool to_user)
  142. {
  143. unsigned long user_addr;
  144. unsigned char *good_buf = NULL;
  145. unsigned char *bad_buf = NULL;
  146. /* Make sure cache was prepared. */
  147. if (!bad_cache) {
  148. pr_warn("Failed to allocate kernel cache\n");
  149. return;
  150. }
  151. /*
  152. * Allocate one buffer from each cache (kmalloc will have the
  153. * SLAB_USERCOPY flag already, but "bad_cache" won't).
  154. */
  155. good_buf = kmalloc(cache_size, GFP_KERNEL);
  156. bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
  157. if (!good_buf || !bad_buf) {
  158. pr_warn("Failed to allocate buffers from caches\n");
  159. goto free_alloc;
  160. }
  161. /* Allocate user memory we'll poke at. */
  162. user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
  163. PROT_READ | PROT_WRITE | PROT_EXEC,
  164. MAP_ANONYMOUS | MAP_PRIVATE, 0);
  165. if (user_addr >= TASK_SIZE) {
  166. pr_warn("Failed to allocate user memory\n");
  167. goto free_alloc;
  168. }
  169. memset(good_buf, 'A', cache_size);
  170. memset(bad_buf, 'B', cache_size);
  171. if (to_user) {
  172. pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
  173. if (copy_to_user((void __user *)user_addr, good_buf,
  174. cache_size)) {
  175. pr_warn("copy_to_user failed unexpectedly?!\n");
  176. goto free_user;
  177. }
  178. pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
  179. if (copy_to_user((void __user *)user_addr, bad_buf,
  180. cache_size)) {
  181. pr_warn("copy_to_user failed, but lacked Oops\n");
  182. goto free_user;
  183. }
  184. } else {
  185. pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
  186. if (copy_from_user(good_buf, (void __user *)user_addr,
  187. cache_size)) {
  188. pr_warn("copy_from_user failed unexpectedly?!\n");
  189. goto free_user;
  190. }
  191. pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
  192. if (copy_from_user(bad_buf, (void __user *)user_addr,
  193. cache_size)) {
  194. pr_warn("copy_from_user failed, but lacked Oops\n");
  195. goto free_user;
  196. }
  197. }
  198. free_user:
  199. vm_munmap(user_addr, PAGE_SIZE);
  200. free_alloc:
  201. if (bad_buf)
  202. kmem_cache_free(bad_cache, bad_buf);
  203. kfree(good_buf);
  204. }
  205. /* Callable tests. */
  206. void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
  207. {
  208. do_usercopy_heap_size(true);
  209. }
  210. void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
  211. {
  212. do_usercopy_heap_size(false);
  213. }
  214. void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
  215. {
  216. do_usercopy_heap_flag(true);
  217. }
  218. void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
  219. {
  220. do_usercopy_heap_flag(false);
  221. }
  222. void lkdtm_USERCOPY_STACK_FRAME_TO(void)
  223. {
  224. do_usercopy_stack(true, true);
  225. }
  226. void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
  227. {
  228. do_usercopy_stack(false, true);
  229. }
  230. void lkdtm_USERCOPY_STACK_BEYOND(void)
  231. {
  232. do_usercopy_stack(true, false);
  233. }
  234. void lkdtm_USERCOPY_KERNEL(void)
  235. {
  236. unsigned long user_addr;
  237. user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
  238. PROT_READ | PROT_WRITE | PROT_EXEC,
  239. MAP_ANONYMOUS | MAP_PRIVATE, 0);
  240. if (user_addr >= TASK_SIZE) {
  241. pr_warn("Failed to allocate user memory\n");
  242. return;
  243. }
  244. pr_info("attempting good copy_to_user from kernel rodata\n");
  245. if (copy_to_user((void __user *)user_addr, test_text,
  246. sizeof(test_text))) {
  247. pr_warn("copy_to_user failed unexpectedly?!\n");
  248. goto free_user;
  249. }
  250. pr_info("attempting bad copy_to_user from kernel text\n");
  251. if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
  252. pr_warn("copy_to_user failed, but lacked Oops\n");
  253. goto free_user;
  254. }
  255. free_user:
  256. vm_munmap(user_addr, PAGE_SIZE);
  257. }
  258. void __init lkdtm_usercopy_init(void)
  259. {
  260. /* Prepare cache that lacks SLAB_USERCOPY flag. */
  261. bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
  262. 0, NULL);
  263. }
  264. void __exit lkdtm_usercopy_exit(void)
  265. {
  266. kmem_cache_destroy(bad_cache);
  267. }