kcov.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "kcov: " fmt
  3. #define DISABLE_BRANCH_PROFILING
  4. #include <linux/atomic.h>
  5. #include <linux/compiler.h>
  6. #include <linux/errno.h>
  7. #include <linux/export.h>
  8. #include <linux/types.h>
  9. #include <linux/file.h>
  10. #include <linux/fs.h>
  11. #include <linux/hashtable.h>
  12. #include <linux/init.h>
  13. #include <linux/mm.h>
  14. #include <linux/preempt.h>
  15. #include <linux/printk.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/kcov.h>
  23. #include <linux/refcount.h>
  24. #include <linux/log2.h>
  25. #include <asm/setup.h>
  26. #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
  27. /* Number of 64-bit words written per one comparison: */
  28. #define KCOV_WORDS_PER_CMP 4
  29. /*
  30. * kcov descriptor (one per opened debugfs file).
  31. * State transitions of the descriptor:
  32. * - initial state after open()
  33. * - then there must be a single ioctl(KCOV_INIT_TRACE) call
  34. * - then, mmap() call (several calls are allowed but not useful)
  35. * - then, ioctl(KCOV_ENABLE, arg), where arg is
  36. * KCOV_TRACE_PC - to trace only the PCs
  37. * or
  38. * KCOV_TRACE_CMP - to trace only the comparison operands
  39. * - then, ioctl(KCOV_DISABLE) to disable the task.
  40. * Enabling/disabling ioctls can be repeated (only one task a time allowed).
  41. */
  42. struct kcov {
  43. /*
  44. * Reference counter. We keep one for:
  45. * - opened file descriptor
  46. * - task with enabled coverage (we can't unwire it from another task)
  47. * - each code section for remote coverage collection
  48. */
  49. refcount_t refcount;
  50. /* The lock protects mode, size, area and t. */
  51. spinlock_t lock;
  52. enum kcov_mode mode;
  53. /* Size of arena (in long's). */
  54. unsigned int size;
  55. /* Coverage buffer shared with user space. */
  56. void *area;
  57. /* Task for which we collect coverage, or NULL. */
  58. struct task_struct *t;
  59. /* Collecting coverage from remote (background) threads. */
  60. bool remote;
  61. /* Size of remote area (in long's). */
  62. unsigned int remote_size;
  63. /*
  64. * Sequence is incremented each time kcov is reenabled, used by
  65. * kcov_remote_stop(), see the comment there.
  66. */
  67. int sequence;
  68. };
  69. struct kcov_remote_area {
  70. struct list_head list;
  71. unsigned int size;
  72. };
  73. struct kcov_remote {
  74. u64 handle;
  75. struct kcov *kcov;
  76. struct hlist_node hnode;
  77. };
  78. static DEFINE_SPINLOCK(kcov_remote_lock);
  79. static DEFINE_HASHTABLE(kcov_remote_map, 4);
  80. static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
  81. struct kcov_percpu_data {
  82. void *irq_area;
  83. unsigned int saved_mode;
  84. unsigned int saved_size;
  85. void *saved_area;
  86. struct kcov *saved_kcov;
  87. int saved_sequence;
  88. };
  89. static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data);
  90. /* Must be called with kcov_remote_lock locked. */
  91. static struct kcov_remote *kcov_remote_find(u64 handle)
  92. {
  93. struct kcov_remote *remote;
  94. hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
  95. if (remote->handle == handle)
  96. return remote;
  97. }
  98. return NULL;
  99. }
  100. /* Must be called with kcov_remote_lock locked. */
  101. static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
  102. {
  103. struct kcov_remote *remote;
  104. if (kcov_remote_find(handle))
  105. return ERR_PTR(-EEXIST);
  106. remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
  107. if (!remote)
  108. return ERR_PTR(-ENOMEM);
  109. remote->handle = handle;
  110. remote->kcov = kcov;
  111. hash_add(kcov_remote_map, &remote->hnode, handle);
  112. return remote;
  113. }
  114. /* Must be called with kcov_remote_lock locked. */
  115. static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
  116. {
  117. struct kcov_remote_area *area;
  118. struct list_head *pos;
  119. list_for_each(pos, &kcov_remote_areas) {
  120. area = list_entry(pos, struct kcov_remote_area, list);
  121. if (area->size == size) {
  122. list_del(&area->list);
  123. return area;
  124. }
  125. }
  126. return NULL;
  127. }
  128. /* Must be called with kcov_remote_lock locked. */
  129. static void kcov_remote_area_put(struct kcov_remote_area *area,
  130. unsigned int size)
  131. {
  132. INIT_LIST_HEAD(&area->list);
  133. area->size = size;
  134. list_add(&area->list, &kcov_remote_areas);
  135. }
  136. static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
  137. {
  138. unsigned int mode;
  139. /*
  140. * We are interested in code coverage as a function of a syscall inputs,
  141. * so we ignore code executed in interrupts, unless we are in a remote
  142. * coverage collection section in a softirq.
  143. */
  144. if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
  145. return false;
  146. mode = READ_ONCE(t->kcov_mode);
  147. /*
  148. * There is some code that runs in interrupts but for which
  149. * in_interrupt() returns false (e.g. preempt_schedule_irq()).
  150. * READ_ONCE()/barrier() effectively provides load-acquire wrt
  151. * interrupts, there are paired barrier()/WRITE_ONCE() in
  152. * kcov_start().
  153. */
  154. barrier();
  155. return mode == needed_mode;
  156. }
  157. static notrace unsigned long canonicalize_ip(unsigned long ip)
  158. {
  159. #ifdef CONFIG_RANDOMIZE_BASE
  160. ip -= kaslr_offset();
  161. #endif
  162. return ip;
  163. }
  164. /*
  165. * Entry point from instrumented code.
  166. * This is called once per basic-block/edge.
  167. */
  168. void notrace __sanitizer_cov_trace_pc(void)
  169. {
  170. struct task_struct *t;
  171. unsigned long *area;
  172. unsigned long ip = canonicalize_ip(_RET_IP_);
  173. unsigned long pos;
  174. t = current;
  175. if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
  176. return;
  177. area = t->kcov_area;
  178. /* The first 64-bit word is the number of subsequent PCs. */
  179. pos = READ_ONCE(area[0]) + 1;
  180. if (likely(pos < t->kcov_size)) {
  181. area[pos] = ip;
  182. WRITE_ONCE(area[0], pos);
  183. }
  184. }
  185. EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
  186. #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
  187. static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
  188. {
  189. struct task_struct *t;
  190. u64 *area;
  191. u64 count, start_index, end_pos, max_pos;
  192. t = current;
  193. if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
  194. return;
  195. ip = canonicalize_ip(ip);
  196. /*
  197. * We write all comparison arguments and types as u64.
  198. * The buffer was allocated for t->kcov_size unsigned longs.
  199. */
  200. area = (u64 *)t->kcov_area;
  201. max_pos = t->kcov_size * sizeof(unsigned long);
  202. count = READ_ONCE(area[0]);
  203. /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
  204. start_index = 1 + count * KCOV_WORDS_PER_CMP;
  205. end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
  206. if (likely(end_pos <= max_pos)) {
  207. area[start_index] = type;
  208. area[start_index + 1] = arg1;
  209. area[start_index + 2] = arg2;
  210. area[start_index + 3] = ip;
  211. WRITE_ONCE(area[0], count + 1);
  212. }
  213. }
  214. void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
  215. {
  216. write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
  217. }
  218. EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
  219. void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
  220. {
  221. write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
  222. }
  223. EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
  224. void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
  225. {
  226. write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
  227. }
  228. EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
  229. void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
  230. {
  231. write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
  232. }
  233. EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
  234. void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
  235. {
  236. write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
  237. _RET_IP_);
  238. }
  239. EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
  240. void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
  241. {
  242. write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
  243. _RET_IP_);
  244. }
  245. EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
  246. void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
  247. {
  248. write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
  249. _RET_IP_);
  250. }
  251. EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
  252. void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
  253. {
  254. write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
  255. _RET_IP_);
  256. }
  257. EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
  258. void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
  259. {
  260. u64 i;
  261. u64 count = cases[0];
  262. u64 size = cases[1];
  263. u64 type = KCOV_CMP_CONST;
  264. switch (size) {
  265. case 8:
  266. type |= KCOV_CMP_SIZE(0);
  267. break;
  268. case 16:
  269. type |= KCOV_CMP_SIZE(1);
  270. break;
  271. case 32:
  272. type |= KCOV_CMP_SIZE(2);
  273. break;
  274. case 64:
  275. type |= KCOV_CMP_SIZE(3);
  276. break;
  277. default:
  278. return;
  279. }
  280. for (i = 0; i < count; i++)
  281. write_comp_data(type, cases[i + 2], val, _RET_IP_);
  282. }
  283. EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
  284. #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
  285. static void kcov_start(struct task_struct *t, struct kcov *kcov,
  286. unsigned int size, void *area, enum kcov_mode mode,
  287. int sequence)
  288. {
  289. kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
  290. t->kcov = kcov;
  291. /* Cache in task struct for performance. */
  292. t->kcov_size = size;
  293. t->kcov_area = area;
  294. t->kcov_sequence = sequence;
  295. /* See comment in check_kcov_mode(). */
  296. barrier();
  297. WRITE_ONCE(t->kcov_mode, mode);
  298. }
  299. static void kcov_stop(struct task_struct *t)
  300. {
  301. WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
  302. barrier();
  303. t->kcov = NULL;
  304. t->kcov_size = 0;
  305. t->kcov_area = NULL;
  306. }
  307. static void kcov_task_reset(struct task_struct *t)
  308. {
  309. kcov_stop(t);
  310. t->kcov_sequence = 0;
  311. t->kcov_handle = 0;
  312. }
  313. void kcov_task_init(struct task_struct *t)
  314. {
  315. kcov_task_reset(t);
  316. t->kcov_handle = current->kcov_handle;
  317. }
  318. static void kcov_reset(struct kcov *kcov)
  319. {
  320. kcov->t = NULL;
  321. kcov->mode = KCOV_MODE_INIT;
  322. kcov->remote = false;
  323. kcov->remote_size = 0;
  324. kcov->sequence++;
  325. }
  326. static void kcov_remote_reset(struct kcov *kcov)
  327. {
  328. int bkt;
  329. struct kcov_remote *remote;
  330. struct hlist_node *tmp;
  331. unsigned long flags;
  332. spin_lock_irqsave(&kcov_remote_lock, flags);
  333. hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
  334. if (remote->kcov != kcov)
  335. continue;
  336. hash_del(&remote->hnode);
  337. kfree(remote);
  338. }
  339. /* Do reset before unlock to prevent races with kcov_remote_start(). */
  340. kcov_reset(kcov);
  341. spin_unlock_irqrestore(&kcov_remote_lock, flags);
  342. }
  343. static void kcov_disable(struct task_struct *t, struct kcov *kcov)
  344. {
  345. kcov_task_reset(t);
  346. if (kcov->remote)
  347. kcov_remote_reset(kcov);
  348. else
  349. kcov_reset(kcov);
  350. }
  351. static void kcov_get(struct kcov *kcov)
  352. {
  353. refcount_inc(&kcov->refcount);
  354. }
  355. static void kcov_put(struct kcov *kcov)
  356. {
  357. if (refcount_dec_and_test(&kcov->refcount)) {
  358. kcov_remote_reset(kcov);
  359. vfree(kcov->area);
  360. kfree(kcov);
  361. }
  362. }
  363. void kcov_task_exit(struct task_struct *t)
  364. {
  365. struct kcov *kcov;
  366. unsigned long flags;
  367. kcov = t->kcov;
  368. if (kcov == NULL)
  369. return;
  370. spin_lock_irqsave(&kcov->lock, flags);
  371. kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
  372. /*
  373. * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
  374. * which comes down to:
  375. * WARN_ON(!kcov->remote && kcov->t != t);
  376. *
  377. * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
  378. *
  379. * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
  380. * In this case we should print a warning right away, since a task
  381. * shouldn't be exiting when it's in a kcov coverage collection
  382. * section. Here t points to the task that is collecting remote
  383. * coverage, and t->kcov->t points to the thread that created the
  384. * kcov device. Which means that to detect this case we need to
  385. * check that t != t->kcov->t, and this gives us the following:
  386. * WARN_ON(kcov->remote && kcov->t != t);
  387. *
  388. * 2. The task that created kcov exiting without calling KCOV_DISABLE,
  389. * and then again we make sure that t->kcov->t == t:
  390. * WARN_ON(kcov->remote && kcov->t != t);
  391. *
  392. * By combining all three checks into one we get:
  393. */
  394. if (WARN_ON(kcov->t != t)) {
  395. spin_unlock_irqrestore(&kcov->lock, flags);
  396. return;
  397. }
  398. /* Just to not leave dangling references behind. */
  399. kcov_disable(t, kcov);
  400. spin_unlock_irqrestore(&kcov->lock, flags);
  401. kcov_put(kcov);
  402. }
  403. static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
  404. {
  405. int res = 0;
  406. void *area;
  407. struct kcov *kcov = vma->vm_file->private_data;
  408. unsigned long size, off;
  409. struct page *page;
  410. unsigned long flags;
  411. area = vmalloc_user(vma->vm_end - vma->vm_start);
  412. if (!area)
  413. return -ENOMEM;
  414. spin_lock_irqsave(&kcov->lock, flags);
  415. size = kcov->size * sizeof(unsigned long);
  416. if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
  417. vma->vm_end - vma->vm_start != size) {
  418. res = -EINVAL;
  419. goto exit;
  420. }
  421. if (!kcov->area) {
  422. kcov->area = area;
  423. vma->vm_flags |= VM_DONTEXPAND;
  424. spin_unlock_irqrestore(&kcov->lock, flags);
  425. for (off = 0; off < size; off += PAGE_SIZE) {
  426. page = vmalloc_to_page(kcov->area + off);
  427. if (vm_insert_page(vma, vma->vm_start + off, page))
  428. WARN_ONCE(1, "vm_insert_page() failed");
  429. }
  430. return 0;
  431. }
  432. exit:
  433. spin_unlock_irqrestore(&kcov->lock, flags);
  434. vfree(area);
  435. return res;
  436. }
  437. static int kcov_open(struct inode *inode, struct file *filep)
  438. {
  439. struct kcov *kcov;
  440. kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
  441. if (!kcov)
  442. return -ENOMEM;
  443. kcov->mode = KCOV_MODE_DISABLED;
  444. kcov->sequence = 1;
  445. refcount_set(&kcov->refcount, 1);
  446. spin_lock_init(&kcov->lock);
  447. filep->private_data = kcov;
  448. return nonseekable_open(inode, filep);
  449. }
  450. static int kcov_close(struct inode *inode, struct file *filep)
  451. {
  452. kcov_put(filep->private_data);
  453. return 0;
  454. }
  455. static int kcov_get_mode(unsigned long arg)
  456. {
  457. if (arg == KCOV_TRACE_PC)
  458. return KCOV_MODE_TRACE_PC;
  459. else if (arg == KCOV_TRACE_CMP)
  460. #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
  461. return KCOV_MODE_TRACE_CMP;
  462. #else
  463. return -ENOTSUPP;
  464. #endif
  465. else
  466. return -EINVAL;
  467. }
  468. /*
  469. * Fault in a lazily-faulted vmalloc area before it can be used by
  470. * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
  471. * vmalloc fault handling path is instrumented.
  472. */
  473. static void kcov_fault_in_area(struct kcov *kcov)
  474. {
  475. unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
  476. unsigned long *area = kcov->area;
  477. unsigned long offset;
  478. for (offset = 0; offset < kcov->size; offset += stride)
  479. READ_ONCE(area[offset]);
  480. }
  481. static inline bool kcov_check_handle(u64 handle, bool common_valid,
  482. bool uncommon_valid, bool zero_valid)
  483. {
  484. if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
  485. return false;
  486. switch (handle & KCOV_SUBSYSTEM_MASK) {
  487. case KCOV_SUBSYSTEM_COMMON:
  488. return (handle & KCOV_INSTANCE_MASK) ?
  489. common_valid : zero_valid;
  490. case KCOV_SUBSYSTEM_USB:
  491. return uncommon_valid;
  492. default:
  493. return false;
  494. }
  495. return false;
  496. }
  497. static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
  498. unsigned long arg)
  499. {
  500. struct task_struct *t;
  501. unsigned long size, unused;
  502. int mode, i;
  503. struct kcov_remote_arg *remote_arg;
  504. struct kcov_remote *remote;
  505. unsigned long flags;
  506. switch (cmd) {
  507. case KCOV_INIT_TRACE:
  508. /*
  509. * Enable kcov in trace mode and setup buffer size.
  510. * Must happen before anything else.
  511. */
  512. if (kcov->mode != KCOV_MODE_DISABLED)
  513. return -EBUSY;
  514. /*
  515. * Size must be at least 2 to hold current position and one PC.
  516. * Later we allocate size * sizeof(unsigned long) memory,
  517. * that must not overflow.
  518. */
  519. size = arg;
  520. if (size < 2 || size > INT_MAX / sizeof(unsigned long))
  521. return -EINVAL;
  522. kcov->size = size;
  523. kcov->mode = KCOV_MODE_INIT;
  524. return 0;
  525. case KCOV_ENABLE:
  526. /*
  527. * Enable coverage for the current task.
  528. * At this point user must have been enabled trace mode,
  529. * and mmapped the file. Coverage collection is disabled only
  530. * at task exit or voluntary by KCOV_DISABLE. After that it can
  531. * be enabled for another task.
  532. */
  533. if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
  534. return -EINVAL;
  535. t = current;
  536. if (kcov->t != NULL || t->kcov != NULL)
  537. return -EBUSY;
  538. mode = kcov_get_mode(arg);
  539. if (mode < 0)
  540. return mode;
  541. kcov_fault_in_area(kcov);
  542. kcov->mode = mode;
  543. kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
  544. kcov->sequence);
  545. kcov->t = t;
  546. /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
  547. kcov_get(kcov);
  548. return 0;
  549. case KCOV_DISABLE:
  550. /* Disable coverage for the current task. */
  551. unused = arg;
  552. if (unused != 0 || current->kcov != kcov)
  553. return -EINVAL;
  554. t = current;
  555. if (WARN_ON(kcov->t != t))
  556. return -EINVAL;
  557. kcov_disable(t, kcov);
  558. kcov_put(kcov);
  559. return 0;
  560. case KCOV_REMOTE_ENABLE:
  561. if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
  562. return -EINVAL;
  563. t = current;
  564. if (kcov->t != NULL || t->kcov != NULL)
  565. return -EBUSY;
  566. remote_arg = (struct kcov_remote_arg *)arg;
  567. mode = kcov_get_mode(remote_arg->trace_mode);
  568. if (mode < 0)
  569. return mode;
  570. if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
  571. return -EINVAL;
  572. kcov->mode = mode;
  573. t->kcov = kcov;
  574. t->kcov_mode = KCOV_MODE_REMOTE;
  575. kcov->t = t;
  576. kcov->remote = true;
  577. kcov->remote_size = remote_arg->area_size;
  578. spin_lock_irqsave(&kcov_remote_lock, flags);
  579. for (i = 0; i < remote_arg->num_handles; i++) {
  580. if (!kcov_check_handle(remote_arg->handles[i],
  581. false, true, false)) {
  582. spin_unlock_irqrestore(&kcov_remote_lock,
  583. flags);
  584. kcov_disable(t, kcov);
  585. return -EINVAL;
  586. }
  587. remote = kcov_remote_add(kcov, remote_arg->handles[i]);
  588. if (IS_ERR(remote)) {
  589. spin_unlock_irqrestore(&kcov_remote_lock,
  590. flags);
  591. kcov_disable(t, kcov);
  592. return PTR_ERR(remote);
  593. }
  594. }
  595. if (remote_arg->common_handle) {
  596. if (!kcov_check_handle(remote_arg->common_handle,
  597. true, false, false)) {
  598. spin_unlock_irqrestore(&kcov_remote_lock,
  599. flags);
  600. kcov_disable(t, kcov);
  601. return -EINVAL;
  602. }
  603. remote = kcov_remote_add(kcov,
  604. remote_arg->common_handle);
  605. if (IS_ERR(remote)) {
  606. spin_unlock_irqrestore(&kcov_remote_lock,
  607. flags);
  608. kcov_disable(t, kcov);
  609. return PTR_ERR(remote);
  610. }
  611. t->kcov_handle = remote_arg->common_handle;
  612. }
  613. spin_unlock_irqrestore(&kcov_remote_lock, flags);
  614. /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
  615. kcov_get(kcov);
  616. return 0;
  617. default:
  618. return -ENOTTY;
  619. }
  620. }
  621. static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  622. {
  623. struct kcov *kcov;
  624. int res;
  625. struct kcov_remote_arg *remote_arg = NULL;
  626. unsigned int remote_num_handles;
  627. unsigned long remote_arg_size;
  628. unsigned long flags;
  629. if (cmd == KCOV_REMOTE_ENABLE) {
  630. if (get_user(remote_num_handles, (unsigned __user *)(arg +
  631. offsetof(struct kcov_remote_arg, num_handles))))
  632. return -EFAULT;
  633. if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
  634. return -EINVAL;
  635. remote_arg_size = struct_size(remote_arg, handles,
  636. remote_num_handles);
  637. remote_arg = memdup_user((void __user *)arg, remote_arg_size);
  638. if (IS_ERR(remote_arg))
  639. return PTR_ERR(remote_arg);
  640. if (remote_arg->num_handles != remote_num_handles) {
  641. kfree(remote_arg);
  642. return -EINVAL;
  643. }
  644. arg = (unsigned long)remote_arg;
  645. }
  646. kcov = filep->private_data;
  647. spin_lock_irqsave(&kcov->lock, flags);
  648. res = kcov_ioctl_locked(kcov, cmd, arg);
  649. spin_unlock_irqrestore(&kcov->lock, flags);
  650. kfree(remote_arg);
  651. return res;
  652. }
  653. static const struct file_operations kcov_fops = {
  654. .open = kcov_open,
  655. .unlocked_ioctl = kcov_ioctl,
  656. .compat_ioctl = kcov_ioctl,
  657. .mmap = kcov_mmap,
  658. .release = kcov_close,
  659. };
  660. /*
  661. * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
  662. * of code in a kernel background thread or in a softirq to allow kcov to be
  663. * used to collect coverage from that part of code.
  664. *
  665. * The handle argument of kcov_remote_start() identifies a code section that is
  666. * used for coverage collection. A userspace process passes this handle to
  667. * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
  668. * coverage for the code section identified by this handle.
  669. *
  670. * The usage of these annotations in the kernel code is different depending on
  671. * the type of the kernel thread whose code is being annotated.
  672. *
  673. * For global kernel threads that are spawned in a limited number of instances
  674. * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for
  675. * softirqs, each instance must be assigned a unique 4-byte instance id. The
  676. * instance id is then combined with a 1-byte subsystem id to get a handle via
  677. * kcov_remote_handle(subsystem_id, instance_id).
  678. *
  679. * For local kernel threads that are spawned from system calls handler when a
  680. * user interacts with some kernel interface (e.g. vhost workers), a handle is
  681. * passed from a userspace process as the common_handle field of the
  682. * kcov_remote_arg struct (note, that the user must generate a handle by using
  683. * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
  684. * arbitrary 4-byte non-zero number as the instance id). This common handle
  685. * then gets saved into the task_struct of the process that issued the
  686. * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
  687. * kernel threads, the common handle must be retrieved via kcov_common_handle()
  688. * and passed to the spawned threads via custom annotations. Those kernel
  689. * threads must in turn be annotated with kcov_remote_start(common_handle) and
  690. * kcov_remote_stop(). All of the threads that are spawned by the same process
  691. * obtain the same handle, hence the name "common".
  692. *
  693. * See Documentation/dev-tools/kcov.rst for more details.
  694. *
  695. * Internally, kcov_remote_start() looks up the kcov device associated with the
  696. * provided handle, allocates an area for coverage collection, and saves the
  697. * pointers to kcov and area into the current task_struct to allow coverage to
  698. * be collected via __sanitizer_cov_trace_pc().
  699. * In turns kcov_remote_stop() clears those pointers from task_struct to stop
  700. * collecting coverage and copies all collected coverage into the kcov area.
  701. */
  702. static inline bool kcov_mode_enabled(unsigned int mode)
  703. {
  704. return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
  705. }
  706. static void kcov_remote_softirq_start(struct task_struct *t)
  707. {
  708. struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
  709. unsigned int mode;
  710. mode = READ_ONCE(t->kcov_mode);
  711. barrier();
  712. if (kcov_mode_enabled(mode)) {
  713. data->saved_mode = mode;
  714. data->saved_size = t->kcov_size;
  715. data->saved_area = t->kcov_area;
  716. data->saved_sequence = t->kcov_sequence;
  717. data->saved_kcov = t->kcov;
  718. kcov_stop(t);
  719. }
  720. }
  721. static void kcov_remote_softirq_stop(struct task_struct *t)
  722. {
  723. struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
  724. if (data->saved_kcov) {
  725. kcov_start(t, data->saved_kcov, data->saved_size,
  726. data->saved_area, data->saved_mode,
  727. data->saved_sequence);
  728. data->saved_mode = 0;
  729. data->saved_size = 0;
  730. data->saved_area = NULL;
  731. data->saved_sequence = 0;
  732. data->saved_kcov = NULL;
  733. }
  734. }
  735. void kcov_remote_start(u64 handle)
  736. {
  737. struct task_struct *t = current;
  738. struct kcov_remote *remote;
  739. struct kcov *kcov;
  740. unsigned int mode;
  741. void *area;
  742. unsigned int size;
  743. int sequence;
  744. unsigned long flags;
  745. if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
  746. return;
  747. if (!in_task() && !in_serving_softirq())
  748. return;
  749. local_irq_save(flags);
  750. /*
  751. * Check that kcov_remote_start() is not called twice in background
  752. * threads nor called by user tasks (with enabled kcov).
  753. */
  754. mode = READ_ONCE(t->kcov_mode);
  755. if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
  756. local_irq_restore(flags);
  757. return;
  758. }
  759. /*
  760. * Check that kcov_remote_start() is not called twice in softirqs.
  761. * Note, that kcov_remote_start() can be called from a softirq that
  762. * happened while collecting coverage from a background thread.
  763. */
  764. if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
  765. local_irq_restore(flags);
  766. return;
  767. }
  768. spin_lock(&kcov_remote_lock);
  769. remote = kcov_remote_find(handle);
  770. if (!remote) {
  771. spin_unlock_irqrestore(&kcov_remote_lock, flags);
  772. return;
  773. }
  774. kcov_debug("handle = %llx, context: %s\n", handle,
  775. in_task() ? "task" : "softirq");
  776. kcov = remote->kcov;
  777. /* Put in kcov_remote_stop(). */
  778. kcov_get(kcov);
  779. /*
  780. * Read kcov fields before unlock to prevent races with
  781. * KCOV_DISABLE / kcov_remote_reset().
  782. */
  783. mode = kcov->mode;
  784. sequence = kcov->sequence;
  785. if (in_task()) {
  786. size = kcov->remote_size;
  787. area = kcov_remote_area_get(size);
  788. } else {
  789. size = CONFIG_KCOV_IRQ_AREA_SIZE;
  790. area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
  791. }
  792. spin_unlock_irqrestore(&kcov_remote_lock, flags);
  793. /* Can only happen when in_task(). */
  794. if (!area) {
  795. area = vmalloc(size * sizeof(unsigned long));
  796. if (!area) {
  797. kcov_put(kcov);
  798. return;
  799. }
  800. }
  801. local_irq_save(flags);
  802. /* Reset coverage size. */
  803. *(u64 *)area = 0;
  804. if (in_serving_softirq()) {
  805. kcov_remote_softirq_start(t);
  806. t->kcov_softirq = 1;
  807. }
  808. kcov_start(t, kcov, size, area, mode, sequence);
  809. local_irq_restore(flags);
  810. }
  811. EXPORT_SYMBOL(kcov_remote_start);
  812. static void kcov_move_area(enum kcov_mode mode, void *dst_area,
  813. unsigned int dst_area_size, void *src_area)
  814. {
  815. u64 word_size = sizeof(unsigned long);
  816. u64 count_size, entry_size_log;
  817. u64 dst_len, src_len;
  818. void *dst_entries, *src_entries;
  819. u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
  820. kcov_debug("%px %u <= %px %lu\n",
  821. dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
  822. switch (mode) {
  823. case KCOV_MODE_TRACE_PC:
  824. dst_len = READ_ONCE(*(unsigned long *)dst_area);
  825. src_len = *(unsigned long *)src_area;
  826. count_size = sizeof(unsigned long);
  827. entry_size_log = __ilog2_u64(sizeof(unsigned long));
  828. break;
  829. case KCOV_MODE_TRACE_CMP:
  830. dst_len = READ_ONCE(*(u64 *)dst_area);
  831. src_len = *(u64 *)src_area;
  832. count_size = sizeof(u64);
  833. BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
  834. entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
  835. break;
  836. default:
  837. WARN_ON(1);
  838. return;
  839. }
  840. /* As arm can't divide u64 integers use log of entry size. */
  841. if (dst_len > ((dst_area_size * word_size - count_size) >>
  842. entry_size_log))
  843. return;
  844. dst_occupied = count_size + (dst_len << entry_size_log);
  845. dst_free = dst_area_size * word_size - dst_occupied;
  846. bytes_to_move = min(dst_free, src_len << entry_size_log);
  847. dst_entries = dst_area + dst_occupied;
  848. src_entries = src_area + count_size;
  849. memcpy(dst_entries, src_entries, bytes_to_move);
  850. entries_moved = bytes_to_move >> entry_size_log;
  851. switch (mode) {
  852. case KCOV_MODE_TRACE_PC:
  853. WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
  854. break;
  855. case KCOV_MODE_TRACE_CMP:
  856. WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
  857. break;
  858. default:
  859. break;
  860. }
  861. }
  862. /* See the comment before kcov_remote_start() for usage details. */
  863. void kcov_remote_stop(void)
  864. {
  865. struct task_struct *t = current;
  866. struct kcov *kcov;
  867. unsigned int mode;
  868. void *area;
  869. unsigned int size;
  870. int sequence;
  871. unsigned long flags;
  872. if (!in_task() && !in_serving_softirq())
  873. return;
  874. local_irq_save(flags);
  875. mode = READ_ONCE(t->kcov_mode);
  876. barrier();
  877. if (!kcov_mode_enabled(mode)) {
  878. local_irq_restore(flags);
  879. return;
  880. }
  881. /*
  882. * When in softirq, check if the corresponding kcov_remote_start()
  883. * actually found the remote handle and started collecting coverage.
  884. */
  885. if (in_serving_softirq() && !t->kcov_softirq) {
  886. local_irq_restore(flags);
  887. return;
  888. }
  889. /* Make sure that kcov_softirq is only set when in softirq. */
  890. if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
  891. local_irq_restore(flags);
  892. return;
  893. }
  894. kcov = t->kcov;
  895. area = t->kcov_area;
  896. size = t->kcov_size;
  897. sequence = t->kcov_sequence;
  898. kcov_stop(t);
  899. if (in_serving_softirq()) {
  900. t->kcov_softirq = 0;
  901. kcov_remote_softirq_stop(t);
  902. }
  903. spin_lock(&kcov->lock);
  904. /*
  905. * KCOV_DISABLE could have been called between kcov_remote_start()
  906. * and kcov_remote_stop(), hence the sequence check.
  907. */
  908. if (sequence == kcov->sequence && kcov->remote)
  909. kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
  910. spin_unlock(&kcov->lock);
  911. if (in_task()) {
  912. spin_lock(&kcov_remote_lock);
  913. kcov_remote_area_put(area, size);
  914. spin_unlock(&kcov_remote_lock);
  915. }
  916. local_irq_restore(flags);
  917. /* Get in kcov_remote_start(). */
  918. kcov_put(kcov);
  919. }
  920. EXPORT_SYMBOL(kcov_remote_stop);
  921. /* See the comment before kcov_remote_start() for usage details. */
  922. u64 kcov_common_handle(void)
  923. {
  924. return current->kcov_handle;
  925. }
  926. EXPORT_SYMBOL(kcov_common_handle);
  927. static int __init kcov_init(void)
  928. {
  929. int cpu;
  930. for_each_possible_cpu(cpu) {
  931. void *area = vmalloc(CONFIG_KCOV_IRQ_AREA_SIZE *
  932. sizeof(unsigned long));
  933. if (!area)
  934. return -ENOMEM;
  935. per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
  936. }
  937. /*
  938. * The kcov debugfs file won't ever get removed and thus,
  939. * there is no need to protect it against removal races. The
  940. * use of debugfs_create_file_unsafe() is actually safe here.
  941. */
  942. debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
  943. return 0;
  944. }
  945. device_initcall(kcov_init);