hw_breakpoint_test.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KUnit test for hw_breakpoint constraints accounting logic.
  4. *
  5. * Copyright (C) 2022, Google LLC.
  6. */
  7. #include <kunit/test.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/hw_breakpoint.h>
  10. #include <linux/kthread.h>
  11. #include <linux/perf_event.h>
  12. #include <asm/hw_breakpoint.h>
  13. #define TEST_REQUIRES_BP_SLOTS(test, slots) \
  14. do { \
  15. if ((slots) > get_test_bp_slots()) { \
  16. kunit_skip((test), "Requires breakpoint slots: %d > %d", slots, \
  17. get_test_bp_slots()); \
  18. } \
  19. } while (0)
  20. #define TEST_EXPECT_NOSPC(expr) KUNIT_EXPECT_EQ(test, -ENOSPC, PTR_ERR(expr))
  21. #define MAX_TEST_BREAKPOINTS 512
  22. static char break_vars[MAX_TEST_BREAKPOINTS];
  23. static struct perf_event *test_bps[MAX_TEST_BREAKPOINTS];
  24. static struct task_struct *__other_task;
  25. static struct perf_event *register_test_bp(int cpu, struct task_struct *tsk, int idx)
  26. {
  27. struct perf_event_attr attr = {};
  28. if (WARN_ON(idx < 0 || idx >= MAX_TEST_BREAKPOINTS))
  29. return NULL;
  30. hw_breakpoint_init(&attr);
  31. attr.bp_addr = (unsigned long)&break_vars[idx];
  32. attr.bp_len = HW_BREAKPOINT_LEN_1;
  33. attr.bp_type = HW_BREAKPOINT_RW;
  34. return perf_event_create_kernel_counter(&attr, cpu, tsk, NULL, NULL);
  35. }
  36. static void unregister_test_bp(struct perf_event **bp)
  37. {
  38. if (WARN_ON(IS_ERR(*bp)))
  39. return;
  40. if (WARN_ON(!*bp))
  41. return;
  42. unregister_hw_breakpoint(*bp);
  43. *bp = NULL;
  44. }
  45. static int get_test_bp_slots(void)
  46. {
  47. static int slots;
  48. if (!slots)
  49. slots = hw_breakpoint_slots(TYPE_DATA);
  50. return slots;
  51. }
  52. static void fill_one_bp_slot(struct kunit *test, int *id, int cpu, struct task_struct *tsk)
  53. {
  54. struct perf_event *bp = register_test_bp(cpu, tsk, *id);
  55. KUNIT_ASSERT_NOT_NULL(test, bp);
  56. KUNIT_ASSERT_FALSE(test, IS_ERR(bp));
  57. KUNIT_ASSERT_NULL(test, test_bps[*id]);
  58. test_bps[(*id)++] = bp;
  59. }
  60. /*
  61. * Fills up the given @cpu/@tsk with breakpoints, only leaving @skip slots free.
  62. *
  63. * Returns true if this can be called again, continuing at @id.
  64. */
  65. static bool fill_bp_slots(struct kunit *test, int *id, int cpu, struct task_struct *tsk, int skip)
  66. {
  67. for (int i = 0; i < get_test_bp_slots() - skip; ++i)
  68. fill_one_bp_slot(test, id, cpu, tsk);
  69. return *id + get_test_bp_slots() <= MAX_TEST_BREAKPOINTS;
  70. }
  71. static int dummy_kthread(void *arg)
  72. {
  73. return 0;
  74. }
  75. static struct task_struct *get_other_task(struct kunit *test)
  76. {
  77. struct task_struct *tsk;
  78. if (__other_task)
  79. return __other_task;
  80. tsk = kthread_create(dummy_kthread, NULL, "hw_breakpoint_dummy_task");
  81. KUNIT_ASSERT_FALSE(test, IS_ERR(tsk));
  82. __other_task = tsk;
  83. return __other_task;
  84. }
  85. static int get_test_cpu(int num)
  86. {
  87. int cpu;
  88. WARN_ON(num < 0);
  89. for_each_online_cpu(cpu) {
  90. if (num-- <= 0)
  91. break;
  92. }
  93. return cpu;
  94. }
  95. /* ===== Test cases ===== */
  96. static void test_one_cpu(struct kunit *test)
  97. {
  98. int idx = 0;
  99. fill_bp_slots(test, &idx, get_test_cpu(0), NULL, 0);
  100. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  101. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  102. }
  103. static void test_many_cpus(struct kunit *test)
  104. {
  105. int idx = 0;
  106. int cpu;
  107. /* Test that CPUs are independent. */
  108. for_each_online_cpu(cpu) {
  109. bool do_continue = fill_bp_slots(test, &idx, cpu, NULL, 0);
  110. TEST_EXPECT_NOSPC(register_test_bp(cpu, NULL, idx));
  111. if (!do_continue)
  112. break;
  113. }
  114. }
  115. static void test_one_task_on_all_cpus(struct kunit *test)
  116. {
  117. int idx = 0;
  118. fill_bp_slots(test, &idx, -1, current, 0);
  119. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  120. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
  121. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  122. /* Remove one and adding back CPU-target should work. */
  123. unregister_test_bp(&test_bps[0]);
  124. fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
  125. }
  126. static void test_two_tasks_on_all_cpus(struct kunit *test)
  127. {
  128. int idx = 0;
  129. /* Test that tasks are independent. */
  130. fill_bp_slots(test, &idx, -1, current, 0);
  131. fill_bp_slots(test, &idx, -1, get_other_task(test), 0);
  132. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  133. TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
  134. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
  135. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
  136. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  137. /* Remove one from first task and adding back CPU-target should not work. */
  138. unregister_test_bp(&test_bps[0]);
  139. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  140. }
  141. static void test_one_task_on_one_cpu(struct kunit *test)
  142. {
  143. int idx = 0;
  144. fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
  145. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  146. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
  147. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  148. /*
  149. * Remove one and adding back CPU-target should work; this case is
  150. * special vs. above because the task's constraints are CPU-dependent.
  151. */
  152. unregister_test_bp(&test_bps[0]);
  153. fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
  154. }
  155. static void test_one_task_mixed(struct kunit *test)
  156. {
  157. int idx = 0;
  158. TEST_REQUIRES_BP_SLOTS(test, 3);
  159. fill_one_bp_slot(test, &idx, get_test_cpu(0), current);
  160. fill_bp_slots(test, &idx, -1, current, 1);
  161. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  162. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
  163. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  164. /* Transition from CPU-dependent pinned count to CPU-independent. */
  165. unregister_test_bp(&test_bps[0]);
  166. unregister_test_bp(&test_bps[1]);
  167. fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
  168. fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
  169. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  170. }
  171. static void test_two_tasks_on_one_cpu(struct kunit *test)
  172. {
  173. int idx = 0;
  174. fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
  175. fill_bp_slots(test, &idx, get_test_cpu(0), get_other_task(test), 0);
  176. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  177. TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
  178. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
  179. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
  180. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  181. /* Can still create breakpoints on some other CPU. */
  182. fill_bp_slots(test, &idx, get_test_cpu(1), NULL, 0);
  183. }
  184. static void test_two_tasks_on_one_all_cpus(struct kunit *test)
  185. {
  186. int idx = 0;
  187. fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
  188. fill_bp_slots(test, &idx, -1, get_other_task(test), 0);
  189. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  190. TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
  191. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
  192. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
  193. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  194. /* Cannot create breakpoints on some other CPU either. */
  195. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
  196. }
  197. static void test_task_on_all_and_one_cpu(struct kunit *test)
  198. {
  199. int tsk_on_cpu_idx, cpu_idx;
  200. int idx = 0;
  201. TEST_REQUIRES_BP_SLOTS(test, 3);
  202. fill_bp_slots(test, &idx, -1, current, 2);
  203. /* Transitioning from only all CPU breakpoints to mixed. */
  204. tsk_on_cpu_idx = idx;
  205. fill_one_bp_slot(test, &idx, get_test_cpu(0), current);
  206. fill_one_bp_slot(test, &idx, -1, current);
  207. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  208. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
  209. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  210. /* We should still be able to use up another CPU's slots. */
  211. cpu_idx = idx;
  212. fill_one_bp_slot(test, &idx, get_test_cpu(1), NULL);
  213. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
  214. /* Transitioning back to task target on all CPUs. */
  215. unregister_test_bp(&test_bps[tsk_on_cpu_idx]);
  216. /* Still have a CPU target breakpoint in get_test_cpu(1). */
  217. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  218. /* Remove it and try again. */
  219. unregister_test_bp(&test_bps[cpu_idx]);
  220. fill_one_bp_slot(test, &idx, -1, current);
  221. TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
  222. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
  223. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
  224. TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
  225. }
  226. static struct kunit_case hw_breakpoint_test_cases[] = {
  227. KUNIT_CASE(test_one_cpu),
  228. KUNIT_CASE(test_many_cpus),
  229. KUNIT_CASE(test_one_task_on_all_cpus),
  230. KUNIT_CASE(test_two_tasks_on_all_cpus),
  231. KUNIT_CASE(test_one_task_on_one_cpu),
  232. KUNIT_CASE(test_one_task_mixed),
  233. KUNIT_CASE(test_two_tasks_on_one_cpu),
  234. KUNIT_CASE(test_two_tasks_on_one_all_cpus),
  235. KUNIT_CASE(test_task_on_all_and_one_cpu),
  236. {},
  237. };
  238. static int test_init(struct kunit *test)
  239. {
  240. /* Most test cases want 2 distinct CPUs. */
  241. if (num_online_cpus() < 2)
  242. kunit_skip(test, "not enough cpus");
  243. /* Want the system to not use breakpoints elsewhere. */
  244. if (hw_breakpoint_is_used())
  245. kunit_skip(test, "hw breakpoint already in use");
  246. return 0;
  247. }
  248. static void test_exit(struct kunit *test)
  249. {
  250. for (int i = 0; i < MAX_TEST_BREAKPOINTS; ++i) {
  251. if (test_bps[i])
  252. unregister_test_bp(&test_bps[i]);
  253. }
  254. if (__other_task) {
  255. kthread_stop(__other_task);
  256. __other_task = NULL;
  257. }
  258. /* Verify that internal state agrees that no breakpoints are in use. */
  259. KUNIT_EXPECT_FALSE(test, hw_breakpoint_is_used());
  260. }
  261. static struct kunit_suite hw_breakpoint_test_suite = {
  262. .name = "hw_breakpoint",
  263. .test_cases = hw_breakpoint_test_cases,
  264. .init = test_init,
  265. .exit = test_exit,
  266. };
  267. kunit_test_suites(&hw_breakpoint_test_suite);
  268. MODULE_LICENSE("GPL");
  269. MODULE_AUTHOR("Marco Elver <[email protected]>");