kcov.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. #define pr_fmt(fmt) "kcov: " fmt
  2. #define DISABLE_BRANCH_PROFILING
  3. #include <linux/atomic.h>
  4. #include <linux/compiler.h>
  5. #include <linux/errno.h>
  6. #include <linux/export.h>
  7. #include <linux/types.h>
  8. #include <linux/file.h>
  9. #include <linux/fs.h>
  10. #include <linux/init.h>
  11. #include <linux/mm.h>
  12. #include <linux/preempt.h>
  13. #include <linux/printk.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/kcov.h>
  21. /*
  22. * kcov descriptor (one per opened debugfs file).
  23. * State transitions of the descriptor:
  24. * - initial state after open()
  25. * - then there must be a single ioctl(KCOV_INIT_TRACE) call
  26. * - then, mmap() call (several calls are allowed but not useful)
  27. * - then, repeated enable/disable for a task (only one task a time allowed)
  28. */
  29. struct kcov {
  30. /*
  31. * Reference counter. We keep one for:
  32. * - opened file descriptor
  33. * - task with enabled coverage (we can't unwire it from another task)
  34. */
  35. atomic_t refcount;
  36. /* The lock protects mode, size, area and t. */
  37. spinlock_t lock;
  38. enum kcov_mode mode;
  39. /* Size of arena (in long's for KCOV_MODE_TRACE). */
  40. unsigned size;
  41. /* Coverage buffer shared with user space. */
  42. void *area;
  43. /* Task for which we collect coverage, or NULL. */
  44. struct task_struct *t;
  45. };
  46. /*
  47. * Entry point from instrumented code.
  48. * This is called once per basic-block/edge.
  49. */
  50. void notrace __sanitizer_cov_trace_pc(void)
  51. {
  52. struct task_struct *t;
  53. enum kcov_mode mode;
  54. t = current;
  55. /*
  56. * We are interested in code coverage as a function of a syscall inputs,
  57. * so we ignore code executed in interrupts.
  58. * The checks for whether we are in an interrupt are open-coded, because
  59. * 1. We can't use in_interrupt() here, since it also returns true
  60. * when we are inside local_bh_disable() section.
  61. * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
  62. * since that leads to slower generated code (three separate tests,
  63. * one for each of the flags).
  64. */
  65. if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
  66. | NMI_MASK)))
  67. return;
  68. mode = READ_ONCE(t->kcov_mode);
  69. if (mode == KCOV_MODE_TRACE) {
  70. unsigned long *area;
  71. unsigned long pos;
  72. /*
  73. * There is some code that runs in interrupts but for which
  74. * in_interrupt() returns false (e.g. preempt_schedule_irq()).
  75. * READ_ONCE()/barrier() effectively provides load-acquire wrt
  76. * interrupts, there are paired barrier()/WRITE_ONCE() in
  77. * kcov_ioctl_locked().
  78. */
  79. barrier();
  80. area = t->kcov_area;
  81. /* The first word is number of subsequent PCs. */
  82. pos = READ_ONCE(area[0]) + 1;
  83. if (likely(pos < t->kcov_size)) {
  84. area[pos] = _RET_IP_;
  85. WRITE_ONCE(area[0], pos);
  86. }
  87. }
  88. }
  89. EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
  90. static void kcov_get(struct kcov *kcov)
  91. {
  92. atomic_inc(&kcov->refcount);
  93. }
  94. static void kcov_put(struct kcov *kcov)
  95. {
  96. if (atomic_dec_and_test(&kcov->refcount)) {
  97. vfree(kcov->area);
  98. kfree(kcov);
  99. }
  100. }
  101. void kcov_task_init(struct task_struct *t)
  102. {
  103. t->kcov_mode = KCOV_MODE_DISABLED;
  104. t->kcov_size = 0;
  105. t->kcov_area = NULL;
  106. t->kcov = NULL;
  107. }
  108. void kcov_task_exit(struct task_struct *t)
  109. {
  110. struct kcov *kcov;
  111. kcov = t->kcov;
  112. if (kcov == NULL)
  113. return;
  114. spin_lock(&kcov->lock);
  115. if (WARN_ON(kcov->t != t)) {
  116. spin_unlock(&kcov->lock);
  117. return;
  118. }
  119. /* Just to not leave dangling references behind. */
  120. kcov_task_init(t);
  121. kcov->t = NULL;
  122. spin_unlock(&kcov->lock);
  123. kcov_put(kcov);
  124. }
  125. static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
  126. {
  127. int res = 0;
  128. void *area;
  129. struct kcov *kcov = vma->vm_file->private_data;
  130. unsigned long size, off;
  131. struct page *page;
  132. area = vmalloc_user(vma->vm_end - vma->vm_start);
  133. if (!area)
  134. return -ENOMEM;
  135. spin_lock(&kcov->lock);
  136. size = kcov->size * sizeof(unsigned long);
  137. if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
  138. vma->vm_end - vma->vm_start != size) {
  139. res = -EINVAL;
  140. goto exit;
  141. }
  142. if (!kcov->area) {
  143. kcov->area = area;
  144. vma->vm_flags |= VM_DONTEXPAND;
  145. spin_unlock(&kcov->lock);
  146. for (off = 0; off < size; off += PAGE_SIZE) {
  147. page = vmalloc_to_page(kcov->area + off);
  148. if (vm_insert_page(vma, vma->vm_start + off, page))
  149. WARN_ONCE(1, "vm_insert_page() failed");
  150. }
  151. return 0;
  152. }
  153. exit:
  154. spin_unlock(&kcov->lock);
  155. vfree(area);
  156. return res;
  157. }
  158. static int kcov_open(struct inode *inode, struct file *filep)
  159. {
  160. struct kcov *kcov;
  161. kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
  162. if (!kcov)
  163. return -ENOMEM;
  164. atomic_set(&kcov->refcount, 1);
  165. spin_lock_init(&kcov->lock);
  166. filep->private_data = kcov;
  167. return nonseekable_open(inode, filep);
  168. }
  169. static int kcov_close(struct inode *inode, struct file *filep)
  170. {
  171. kcov_put(filep->private_data);
  172. return 0;
  173. }
  174. static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
  175. unsigned long arg)
  176. {
  177. struct task_struct *t;
  178. unsigned long size, unused;
  179. switch (cmd) {
  180. case KCOV_INIT_TRACE:
  181. /*
  182. * Enable kcov in trace mode and setup buffer size.
  183. * Must happen before anything else.
  184. */
  185. if (kcov->mode != KCOV_MODE_DISABLED)
  186. return -EBUSY;
  187. /*
  188. * Size must be at least 2 to hold current position and one PC.
  189. * Later we allocate size * sizeof(unsigned long) memory,
  190. * that must not overflow.
  191. */
  192. size = arg;
  193. if (size < 2 || size > INT_MAX / sizeof(unsigned long))
  194. return -EINVAL;
  195. kcov->size = size;
  196. kcov->mode = KCOV_MODE_TRACE;
  197. return 0;
  198. case KCOV_ENABLE:
  199. /*
  200. * Enable coverage for the current task.
  201. * At this point user must have been enabled trace mode,
  202. * and mmapped the file. Coverage collection is disabled only
  203. * at task exit or voluntary by KCOV_DISABLE. After that it can
  204. * be enabled for another task.
  205. */
  206. unused = arg;
  207. if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
  208. kcov->area == NULL)
  209. return -EINVAL;
  210. if (kcov->t != NULL)
  211. return -EBUSY;
  212. t = current;
  213. /* Cache in task struct for performance. */
  214. t->kcov_size = kcov->size;
  215. t->kcov_area = kcov->area;
  216. /* See comment in __sanitizer_cov_trace_pc(). */
  217. barrier();
  218. WRITE_ONCE(t->kcov_mode, kcov->mode);
  219. t->kcov = kcov;
  220. kcov->t = t;
  221. /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
  222. kcov_get(kcov);
  223. return 0;
  224. case KCOV_DISABLE:
  225. /* Disable coverage for the current task. */
  226. unused = arg;
  227. if (unused != 0 || current->kcov != kcov)
  228. return -EINVAL;
  229. t = current;
  230. if (WARN_ON(kcov->t != t))
  231. return -EINVAL;
  232. kcov_task_init(t);
  233. kcov->t = NULL;
  234. kcov_put(kcov);
  235. return 0;
  236. default:
  237. return -ENOTTY;
  238. }
  239. }
  240. static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  241. {
  242. struct kcov *kcov;
  243. int res;
  244. kcov = filep->private_data;
  245. spin_lock(&kcov->lock);
  246. res = kcov_ioctl_locked(kcov, cmd, arg);
  247. spin_unlock(&kcov->lock);
  248. return res;
  249. }
  250. static const struct file_operations kcov_fops = {
  251. .open = kcov_open,
  252. .unlocked_ioctl = kcov_ioctl,
  253. .mmap = kcov_mmap,
  254. .release = kcov_close,
  255. };
  256. static int __init kcov_init(void)
  257. {
  258. /*
  259. * The kcov debugfs file won't ever get removed and thus,
  260. * there is no need to protect it against removal races. The
  261. * use of debugfs_create_file_unsafe() is actually safe here.
  262. */
  263. if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
  264. pr_err("failed to create kcov in debugfs\n");
  265. return -ENOMEM;
  266. }
  267. return 0;
  268. }
  269. device_initcall(kcov_init);