kcov.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. #define pr_fmt(fmt) "kcov: " fmt
  2. #define DISABLE_BRANCH_PROFILING
  3. #include <linux/compiler.h>
  4. #include <linux/types.h>
  5. #include <linux/file.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/printk.h>
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/kcov.h>
  16. /*
  17. * kcov descriptor (one per opened debugfs file).
  18. * State transitions of the descriptor:
  19. * - initial state after open()
  20. * - then there must be a single ioctl(KCOV_INIT_TRACE) call
  21. * - then, mmap() call (several calls are allowed but not useful)
  22. * - then, repeated enable/disable for a task (only one task a time allowed)
  23. */
  24. struct kcov {
  25. /*
  26. * Reference counter. We keep one for:
  27. * - opened file descriptor
  28. * - task with enabled coverage (we can't unwire it from another task)
  29. */
  30. atomic_t refcount;
  31. /* The lock protects mode, size, area and t. */
  32. spinlock_t lock;
  33. enum kcov_mode mode;
  34. /* Size of arena (in long's for KCOV_MODE_TRACE). */
  35. unsigned size;
  36. /* Coverage buffer shared with user space. */
  37. void *area;
  38. /* Task for which we collect coverage, or NULL. */
  39. struct task_struct *t;
  40. };
  41. /*
  42. * Entry point from instrumented code.
  43. * This is called once per basic-block/edge.
  44. */
  45. void notrace __sanitizer_cov_trace_pc(void)
  46. {
  47. struct task_struct *t;
  48. enum kcov_mode mode;
  49. t = current;
  50. /*
  51. * We are interested in code coverage as a function of a syscall inputs,
  52. * so we ignore code executed in interrupts.
  53. * The checks for whether we are in an interrupt are open-coded, because
  54. * 1. We can't use in_interrupt() here, since it also returns true
  55. * when we are inside local_bh_disable() section.
  56. * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
  57. * since that leads to slower generated code (three separate tests,
  58. * one for each of the flags).
  59. */
  60. if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
  61. | NMI_MASK)))
  62. return;
  63. mode = READ_ONCE(t->kcov_mode);
  64. if (mode == KCOV_MODE_TRACE) {
  65. unsigned long *area;
  66. unsigned long pos;
  67. /*
  68. * There is some code that runs in interrupts but for which
  69. * in_interrupt() returns false (e.g. preempt_schedule_irq()).
  70. * READ_ONCE()/barrier() effectively provides load-acquire wrt
  71. * interrupts, there are paired barrier()/WRITE_ONCE() in
  72. * kcov_ioctl_locked().
  73. */
  74. barrier();
  75. area = t->kcov_area;
  76. /* The first word is number of subsequent PCs. */
  77. pos = READ_ONCE(area[0]) + 1;
  78. if (likely(pos < t->kcov_size)) {
  79. area[pos] = _RET_IP_;
  80. WRITE_ONCE(area[0], pos);
  81. }
  82. }
  83. }
  84. EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
  85. static void kcov_get(struct kcov *kcov)
  86. {
  87. atomic_inc(&kcov->refcount);
  88. }
  89. static void kcov_put(struct kcov *kcov)
  90. {
  91. if (atomic_dec_and_test(&kcov->refcount)) {
  92. vfree(kcov->area);
  93. kfree(kcov);
  94. }
  95. }
  96. void kcov_task_init(struct task_struct *t)
  97. {
  98. t->kcov_mode = KCOV_MODE_DISABLED;
  99. t->kcov_size = 0;
  100. t->kcov_area = NULL;
  101. t->kcov = NULL;
  102. }
  103. void kcov_task_exit(struct task_struct *t)
  104. {
  105. struct kcov *kcov;
  106. kcov = t->kcov;
  107. if (kcov == NULL)
  108. return;
  109. spin_lock(&kcov->lock);
  110. if (WARN_ON(kcov->t != t)) {
  111. spin_unlock(&kcov->lock);
  112. return;
  113. }
  114. /* Just to not leave dangling references behind. */
  115. kcov_task_init(t);
  116. kcov->t = NULL;
  117. spin_unlock(&kcov->lock);
  118. kcov_put(kcov);
  119. }
  120. static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
  121. {
  122. int res = 0;
  123. void *area;
  124. struct kcov *kcov = vma->vm_file->private_data;
  125. unsigned long size, off;
  126. struct page *page;
  127. area = vmalloc_user(vma->vm_end - vma->vm_start);
  128. if (!area)
  129. return -ENOMEM;
  130. spin_lock(&kcov->lock);
  131. size = kcov->size * sizeof(unsigned long);
  132. if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
  133. vma->vm_end - vma->vm_start != size) {
  134. res = -EINVAL;
  135. goto exit;
  136. }
  137. if (!kcov->area) {
  138. kcov->area = area;
  139. vma->vm_flags |= VM_DONTEXPAND;
  140. spin_unlock(&kcov->lock);
  141. for (off = 0; off < size; off += PAGE_SIZE) {
  142. page = vmalloc_to_page(kcov->area + off);
  143. if (vm_insert_page(vma, vma->vm_start + off, page))
  144. WARN_ONCE(1, "vm_insert_page() failed");
  145. }
  146. return 0;
  147. }
  148. exit:
  149. spin_unlock(&kcov->lock);
  150. vfree(area);
  151. return res;
  152. }
  153. static int kcov_open(struct inode *inode, struct file *filep)
  154. {
  155. struct kcov *kcov;
  156. kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
  157. if (!kcov)
  158. return -ENOMEM;
  159. atomic_set(&kcov->refcount, 1);
  160. spin_lock_init(&kcov->lock);
  161. filep->private_data = kcov;
  162. return nonseekable_open(inode, filep);
  163. }
  164. static int kcov_close(struct inode *inode, struct file *filep)
  165. {
  166. kcov_put(filep->private_data);
  167. return 0;
  168. }
  169. static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
  170. unsigned long arg)
  171. {
  172. struct task_struct *t;
  173. unsigned long size, unused;
  174. switch (cmd) {
  175. case KCOV_INIT_TRACE:
  176. /*
  177. * Enable kcov in trace mode and setup buffer size.
  178. * Must happen before anything else.
  179. */
  180. if (kcov->mode != KCOV_MODE_DISABLED)
  181. return -EBUSY;
  182. /*
  183. * Size must be at least 2 to hold current position and one PC.
  184. * Later we allocate size * sizeof(unsigned long) memory,
  185. * that must not overflow.
  186. */
  187. size = arg;
  188. if (size < 2 || size > INT_MAX / sizeof(unsigned long))
  189. return -EINVAL;
  190. kcov->size = size;
  191. kcov->mode = KCOV_MODE_TRACE;
  192. return 0;
  193. case KCOV_ENABLE:
  194. /*
  195. * Enable coverage for the current task.
  196. * At this point user must have been enabled trace mode,
  197. * and mmapped the file. Coverage collection is disabled only
  198. * at task exit or voluntary by KCOV_DISABLE. After that it can
  199. * be enabled for another task.
  200. */
  201. unused = arg;
  202. if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
  203. kcov->area == NULL)
  204. return -EINVAL;
  205. if (kcov->t != NULL)
  206. return -EBUSY;
  207. t = current;
  208. /* Cache in task struct for performance. */
  209. t->kcov_size = kcov->size;
  210. t->kcov_area = kcov->area;
  211. /* See comment in __sanitizer_cov_trace_pc(). */
  212. barrier();
  213. WRITE_ONCE(t->kcov_mode, kcov->mode);
  214. t->kcov = kcov;
  215. kcov->t = t;
  216. /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
  217. kcov_get(kcov);
  218. return 0;
  219. case KCOV_DISABLE:
  220. /* Disable coverage for the current task. */
  221. unused = arg;
  222. if (unused != 0 || current->kcov != kcov)
  223. return -EINVAL;
  224. t = current;
  225. if (WARN_ON(kcov->t != t))
  226. return -EINVAL;
  227. kcov_task_init(t);
  228. kcov->t = NULL;
  229. kcov_put(kcov);
  230. return 0;
  231. default:
  232. return -ENOTTY;
  233. }
  234. }
  235. static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  236. {
  237. struct kcov *kcov;
  238. int res;
  239. kcov = filep->private_data;
  240. spin_lock(&kcov->lock);
  241. res = kcov_ioctl_locked(kcov, cmd, arg);
  242. spin_unlock(&kcov->lock);
  243. return res;
  244. }
  245. static const struct file_operations kcov_fops = {
  246. .open = kcov_open,
  247. .unlocked_ioctl = kcov_ioctl,
  248. .mmap = kcov_mmap,
  249. .release = kcov_close,
  250. };
  251. static int __init kcov_init(void)
  252. {
  253. /*
  254. * The kcov debugfs file won't ever get removed and thus,
  255. * there is no need to protect it against removal races. The
  256. * use of debugfs_create_file_unsafe() is actually safe here.
  257. */
  258. if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
  259. pr_err("failed to create kcov in debugfs\n");
  260. return -ENOMEM;
  261. }
  262. return 0;
  263. }
  264. device_initcall(kcov_init);