trace_selftest.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Include in trace.c */
  3. #include <uapi/linux/sched/types.h>
  4. #include <linux/stringify.h>
  5. #include <linux/kthread.h>
  6. #include <linux/delay.h>
  7. #include <linux/slab.h>
  8. static inline int trace_valid_entry(struct trace_entry *entry)
  9. {
  10. switch (entry->type) {
  11. case TRACE_FN:
  12. case TRACE_CTX:
  13. case TRACE_WAKE:
  14. case TRACE_STACK:
  15. case TRACE_PRINT:
  16. case TRACE_BRANCH:
  17. case TRACE_GRAPH_ENT:
  18. case TRACE_GRAPH_RET:
  19. return 1;
  20. }
  21. return 0;
  22. }
  23. static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
  24. {
  25. struct ring_buffer_event *event;
  26. struct trace_entry *entry;
  27. unsigned int loops = 0;
  28. while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  29. entry = ring_buffer_event_data(event);
  30. /*
  31. * The ring buffer is a size of trace_buf_size, if
  32. * we loop more than the size, there's something wrong
  33. * with the ring buffer.
  34. */
  35. if (loops++ > trace_buf_size) {
  36. printk(KERN_CONT ".. bad ring buffer ");
  37. goto failed;
  38. }
  39. if (!trace_valid_entry(entry)) {
  40. printk(KERN_CONT ".. invalid entry %d ",
  41. entry->type);
  42. goto failed;
  43. }
  44. }
  45. return 0;
  46. failed:
  47. /* disable tracing */
  48. tracing_disabled = 1;
  49. printk(KERN_CONT ".. corrupted trace buffer .. ");
  50. return -1;
  51. }
  52. /*
  53. * Test the trace buffer to see if all the elements
  54. * are still sane.
  55. */
  56. static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
  57. {
  58. unsigned long flags, cnt = 0;
  59. int cpu, ret = 0;
  60. /* Don't allow flipping of max traces now */
  61. local_irq_save(flags);
  62. arch_spin_lock(&buf->tr->max_lock);
  63. cnt = ring_buffer_entries(buf->buffer);
  64. /*
  65. * The trace_test_buffer_cpu runs a while loop to consume all data.
  66. * If the calling tracer is broken, and is constantly filling
  67. * the buffer, this will run forever, and hard lock the box.
  68. * We disable the ring buffer while we do this test to prevent
  69. * a hard lock up.
  70. */
  71. tracing_off();
  72. for_each_possible_cpu(cpu) {
  73. ret = trace_test_buffer_cpu(buf, cpu);
  74. if (ret)
  75. break;
  76. }
  77. tracing_on();
  78. arch_spin_unlock(&buf->tr->max_lock);
  79. local_irq_restore(flags);
  80. if (count)
  81. *count = cnt;
  82. return ret;
  83. }
  84. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  85. {
  86. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  87. trace->name, init_ret);
  88. }
  89. #ifdef CONFIG_FUNCTION_TRACER
  90. #ifdef CONFIG_DYNAMIC_FTRACE
  91. static int trace_selftest_test_probe1_cnt;
  92. static void trace_selftest_test_probe1_func(unsigned long ip,
  93. unsigned long pip,
  94. struct ftrace_ops *op,
  95. struct ftrace_regs *fregs)
  96. {
  97. trace_selftest_test_probe1_cnt++;
  98. }
  99. static int trace_selftest_test_probe2_cnt;
  100. static void trace_selftest_test_probe2_func(unsigned long ip,
  101. unsigned long pip,
  102. struct ftrace_ops *op,
  103. struct ftrace_regs *fregs)
  104. {
  105. trace_selftest_test_probe2_cnt++;
  106. }
  107. static int trace_selftest_test_probe3_cnt;
  108. static void trace_selftest_test_probe3_func(unsigned long ip,
  109. unsigned long pip,
  110. struct ftrace_ops *op,
  111. struct ftrace_regs *fregs)
  112. {
  113. trace_selftest_test_probe3_cnt++;
  114. }
  115. static int trace_selftest_test_global_cnt;
  116. static void trace_selftest_test_global_func(unsigned long ip,
  117. unsigned long pip,
  118. struct ftrace_ops *op,
  119. struct ftrace_regs *fregs)
  120. {
  121. trace_selftest_test_global_cnt++;
  122. }
  123. static int trace_selftest_test_dyn_cnt;
  124. static void trace_selftest_test_dyn_func(unsigned long ip,
  125. unsigned long pip,
  126. struct ftrace_ops *op,
  127. struct ftrace_regs *fregs)
  128. {
  129. trace_selftest_test_dyn_cnt++;
  130. }
  131. static struct ftrace_ops test_probe1 = {
  132. .func = trace_selftest_test_probe1_func,
  133. };
  134. static struct ftrace_ops test_probe2 = {
  135. .func = trace_selftest_test_probe2_func,
  136. };
  137. static struct ftrace_ops test_probe3 = {
  138. .func = trace_selftest_test_probe3_func,
  139. };
  140. static void print_counts(void)
  141. {
  142. printk("(%d %d %d %d %d) ",
  143. trace_selftest_test_probe1_cnt,
  144. trace_selftest_test_probe2_cnt,
  145. trace_selftest_test_probe3_cnt,
  146. trace_selftest_test_global_cnt,
  147. trace_selftest_test_dyn_cnt);
  148. }
  149. static void reset_counts(void)
  150. {
  151. trace_selftest_test_probe1_cnt = 0;
  152. trace_selftest_test_probe2_cnt = 0;
  153. trace_selftest_test_probe3_cnt = 0;
  154. trace_selftest_test_global_cnt = 0;
  155. trace_selftest_test_dyn_cnt = 0;
  156. }
  157. static int trace_selftest_ops(struct trace_array *tr, int cnt)
  158. {
  159. int save_ftrace_enabled = ftrace_enabled;
  160. struct ftrace_ops *dyn_ops;
  161. char *func1_name;
  162. char *func2_name;
  163. int len1;
  164. int len2;
  165. int ret = -1;
  166. printk(KERN_CONT "PASSED\n");
  167. pr_info("Testing dynamic ftrace ops #%d: ", cnt);
  168. ftrace_enabled = 1;
  169. reset_counts();
  170. /* Handle PPC64 '.' name */
  171. func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  172. func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
  173. len1 = strlen(func1_name);
  174. len2 = strlen(func2_name);
  175. /*
  176. * Probe 1 will trace function 1.
  177. * Probe 2 will trace function 2.
  178. * Probe 3 will trace functions 1 and 2.
  179. */
  180. ftrace_set_filter(&test_probe1, func1_name, len1, 1);
  181. ftrace_set_filter(&test_probe2, func2_name, len2, 1);
  182. ftrace_set_filter(&test_probe3, func1_name, len1, 1);
  183. ftrace_set_filter(&test_probe3, func2_name, len2, 0);
  184. register_ftrace_function(&test_probe1);
  185. register_ftrace_function(&test_probe2);
  186. register_ftrace_function(&test_probe3);
  187. /* First time we are running with main function */
  188. if (cnt > 1) {
  189. ftrace_init_array_ops(tr, trace_selftest_test_global_func);
  190. register_ftrace_function(tr->ops);
  191. }
  192. DYN_FTRACE_TEST_NAME();
  193. print_counts();
  194. if (trace_selftest_test_probe1_cnt != 1)
  195. goto out;
  196. if (trace_selftest_test_probe2_cnt != 0)
  197. goto out;
  198. if (trace_selftest_test_probe3_cnt != 1)
  199. goto out;
  200. if (cnt > 1) {
  201. if (trace_selftest_test_global_cnt == 0)
  202. goto out;
  203. }
  204. DYN_FTRACE_TEST_NAME2();
  205. print_counts();
  206. if (trace_selftest_test_probe1_cnt != 1)
  207. goto out;
  208. if (trace_selftest_test_probe2_cnt != 1)
  209. goto out;
  210. if (trace_selftest_test_probe3_cnt != 2)
  211. goto out;
  212. /* Add a dynamic probe */
  213. dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
  214. if (!dyn_ops) {
  215. printk("MEMORY ERROR ");
  216. goto out;
  217. }
  218. dyn_ops->func = trace_selftest_test_dyn_func;
  219. register_ftrace_function(dyn_ops);
  220. trace_selftest_test_global_cnt = 0;
  221. DYN_FTRACE_TEST_NAME();
  222. print_counts();
  223. if (trace_selftest_test_probe1_cnt != 2)
  224. goto out_free;
  225. if (trace_selftest_test_probe2_cnt != 1)
  226. goto out_free;
  227. if (trace_selftest_test_probe3_cnt != 3)
  228. goto out_free;
  229. if (cnt > 1) {
  230. if (trace_selftest_test_global_cnt == 0)
  231. goto out_free;
  232. }
  233. if (trace_selftest_test_dyn_cnt == 0)
  234. goto out_free;
  235. DYN_FTRACE_TEST_NAME2();
  236. print_counts();
  237. if (trace_selftest_test_probe1_cnt != 2)
  238. goto out_free;
  239. if (trace_selftest_test_probe2_cnt != 2)
  240. goto out_free;
  241. if (trace_selftest_test_probe3_cnt != 4)
  242. goto out_free;
  243. /* Remove trace function from probe 3 */
  244. func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
  245. len1 = strlen(func1_name);
  246. ftrace_set_filter(&test_probe3, func1_name, len1, 0);
  247. DYN_FTRACE_TEST_NAME();
  248. print_counts();
  249. if (trace_selftest_test_probe1_cnt != 3)
  250. goto out_free;
  251. if (trace_selftest_test_probe2_cnt != 2)
  252. goto out_free;
  253. if (trace_selftest_test_probe3_cnt != 4)
  254. goto out_free;
  255. if (cnt > 1) {
  256. if (trace_selftest_test_global_cnt == 0)
  257. goto out_free;
  258. }
  259. if (trace_selftest_test_dyn_cnt == 0)
  260. goto out_free;
  261. DYN_FTRACE_TEST_NAME2();
  262. print_counts();
  263. if (trace_selftest_test_probe1_cnt != 3)
  264. goto out_free;
  265. if (trace_selftest_test_probe2_cnt != 3)
  266. goto out_free;
  267. if (trace_selftest_test_probe3_cnt != 5)
  268. goto out_free;
  269. ret = 0;
  270. out_free:
  271. unregister_ftrace_function(dyn_ops);
  272. kfree(dyn_ops);
  273. out:
  274. /* Purposely unregister in the same order */
  275. unregister_ftrace_function(&test_probe1);
  276. unregister_ftrace_function(&test_probe2);
  277. unregister_ftrace_function(&test_probe3);
  278. if (cnt > 1)
  279. unregister_ftrace_function(tr->ops);
  280. ftrace_reset_array_ops(tr);
  281. /* Make sure everything is off */
  282. reset_counts();
  283. DYN_FTRACE_TEST_NAME();
  284. DYN_FTRACE_TEST_NAME();
  285. if (trace_selftest_test_probe1_cnt ||
  286. trace_selftest_test_probe2_cnt ||
  287. trace_selftest_test_probe3_cnt ||
  288. trace_selftest_test_global_cnt ||
  289. trace_selftest_test_dyn_cnt)
  290. ret = -1;
  291. ftrace_enabled = save_ftrace_enabled;
  292. return ret;
  293. }
  294. /* Test dynamic code modification and ftrace filters */
  295. static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  296. struct trace_array *tr,
  297. int (*func)(void))
  298. {
  299. int save_ftrace_enabled = ftrace_enabled;
  300. unsigned long count;
  301. char *func_name;
  302. int ret;
  303. /* The ftrace test PASSED */
  304. printk(KERN_CONT "PASSED\n");
  305. pr_info("Testing dynamic ftrace: ");
  306. /* enable tracing, and record the filter function */
  307. ftrace_enabled = 1;
  308. /* passed in by parameter to fool gcc from optimizing */
  309. func();
  310. /*
  311. * Some archs *cough*PowerPC*cough* add characters to the
  312. * start of the function names. We simply put a '*' to
  313. * accommodate them.
  314. */
  315. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  316. /* filter only on our function */
  317. ftrace_set_global_filter(func_name, strlen(func_name), 1);
  318. /* enable tracing */
  319. ret = tracer_init(trace, tr);
  320. if (ret) {
  321. warn_failed_init_tracer(trace, ret);
  322. goto out;
  323. }
  324. /* Sleep for a 1/10 of a second */
  325. msleep(100);
  326. /* we should have nothing in the buffer */
  327. ret = trace_test_buffer(&tr->array_buffer, &count);
  328. if (ret)
  329. goto out;
  330. if (count) {
  331. ret = -1;
  332. printk(KERN_CONT ".. filter did not filter .. ");
  333. goto out;
  334. }
  335. /* call our function again */
  336. func();
  337. /* sleep again */
  338. msleep(100);
  339. /* stop the tracing. */
  340. tracing_stop();
  341. ftrace_enabled = 0;
  342. /* check the trace buffer */
  343. ret = trace_test_buffer(&tr->array_buffer, &count);
  344. ftrace_enabled = 1;
  345. tracing_start();
  346. /* we should only have one item */
  347. if (!ret && count != 1) {
  348. trace->reset(tr);
  349. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  350. ret = -1;
  351. goto out;
  352. }
  353. /* Test the ops with global tracing running */
  354. ret = trace_selftest_ops(tr, 1);
  355. trace->reset(tr);
  356. out:
  357. ftrace_enabled = save_ftrace_enabled;
  358. /* Enable tracing on all functions again */
  359. ftrace_set_global_filter(NULL, 0, 1);
  360. /* Test the ops with global tracing off */
  361. if (!ret)
  362. ret = trace_selftest_ops(tr, 2);
  363. return ret;
  364. }
  365. static int trace_selftest_recursion_cnt;
  366. static void trace_selftest_test_recursion_func(unsigned long ip,
  367. unsigned long pip,
  368. struct ftrace_ops *op,
  369. struct ftrace_regs *fregs)
  370. {
  371. /*
  372. * This function is registered without the recursion safe flag.
  373. * The ftrace infrastructure should provide the recursion
  374. * protection. If not, this will crash the kernel!
  375. */
  376. if (trace_selftest_recursion_cnt++ > 10)
  377. return;
  378. DYN_FTRACE_TEST_NAME();
  379. }
  380. static void trace_selftest_test_recursion_safe_func(unsigned long ip,
  381. unsigned long pip,
  382. struct ftrace_ops *op,
  383. struct ftrace_regs *fregs)
  384. {
  385. /*
  386. * We said we would provide our own recursion. By calling
  387. * this function again, we should recurse back into this function
  388. * and count again. But this only happens if the arch supports
  389. * all of ftrace features and nothing else is using the function
  390. * tracing utility.
  391. */
  392. if (trace_selftest_recursion_cnt++)
  393. return;
  394. DYN_FTRACE_TEST_NAME();
  395. }
  396. static struct ftrace_ops test_rec_probe = {
  397. .func = trace_selftest_test_recursion_func,
  398. .flags = FTRACE_OPS_FL_RECURSION,
  399. };
  400. static struct ftrace_ops test_recsafe_probe = {
  401. .func = trace_selftest_test_recursion_safe_func,
  402. };
  403. static int
  404. trace_selftest_function_recursion(void)
  405. {
  406. int save_ftrace_enabled = ftrace_enabled;
  407. char *func_name;
  408. int len;
  409. int ret;
  410. /* The previous test PASSED */
  411. pr_cont("PASSED\n");
  412. pr_info("Testing ftrace recursion: ");
  413. /* enable tracing, and record the filter function */
  414. ftrace_enabled = 1;
  415. /* Handle PPC64 '.' name */
  416. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  417. len = strlen(func_name);
  418. ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
  419. if (ret) {
  420. pr_cont("*Could not set filter* ");
  421. goto out;
  422. }
  423. ret = register_ftrace_function(&test_rec_probe);
  424. if (ret) {
  425. pr_cont("*could not register callback* ");
  426. goto out;
  427. }
  428. DYN_FTRACE_TEST_NAME();
  429. unregister_ftrace_function(&test_rec_probe);
  430. ret = -1;
  431. /*
  432. * Recursion allows for transitions between context,
  433. * and may call the callback twice.
  434. */
  435. if (trace_selftest_recursion_cnt != 1 &&
  436. trace_selftest_recursion_cnt != 2) {
  437. pr_cont("*callback not called once (or twice) (%d)* ",
  438. trace_selftest_recursion_cnt);
  439. goto out;
  440. }
  441. trace_selftest_recursion_cnt = 1;
  442. pr_cont("PASSED\n");
  443. pr_info("Testing ftrace recursion safe: ");
  444. ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
  445. if (ret) {
  446. pr_cont("*Could not set filter* ");
  447. goto out;
  448. }
  449. ret = register_ftrace_function(&test_recsafe_probe);
  450. if (ret) {
  451. pr_cont("*could not register callback* ");
  452. goto out;
  453. }
  454. DYN_FTRACE_TEST_NAME();
  455. unregister_ftrace_function(&test_recsafe_probe);
  456. ret = -1;
  457. if (trace_selftest_recursion_cnt != 2) {
  458. pr_cont("*callback not called expected 2 times (%d)* ",
  459. trace_selftest_recursion_cnt);
  460. goto out;
  461. }
  462. ret = 0;
  463. out:
  464. ftrace_enabled = save_ftrace_enabled;
  465. return ret;
  466. }
  467. #else
  468. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  469. # define trace_selftest_function_recursion() ({ 0; })
  470. #endif /* CONFIG_DYNAMIC_FTRACE */
  471. static enum {
  472. TRACE_SELFTEST_REGS_START,
  473. TRACE_SELFTEST_REGS_FOUND,
  474. TRACE_SELFTEST_REGS_NOT_FOUND,
  475. } trace_selftest_regs_stat;
  476. static void trace_selftest_test_regs_func(unsigned long ip,
  477. unsigned long pip,
  478. struct ftrace_ops *op,
  479. struct ftrace_regs *fregs)
  480. {
  481. struct pt_regs *regs = ftrace_get_regs(fregs);
  482. if (regs)
  483. trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
  484. else
  485. trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
  486. }
  487. static struct ftrace_ops test_regs_probe = {
  488. .func = trace_selftest_test_regs_func,
  489. .flags = FTRACE_OPS_FL_SAVE_REGS,
  490. };
  491. static int
  492. trace_selftest_function_regs(void)
  493. {
  494. int save_ftrace_enabled = ftrace_enabled;
  495. char *func_name;
  496. int len;
  497. int ret;
  498. int supported = 0;
  499. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  500. supported = 1;
  501. #endif
  502. /* The previous test PASSED */
  503. pr_cont("PASSED\n");
  504. pr_info("Testing ftrace regs%s: ",
  505. !supported ? "(no arch support)" : "");
  506. /* enable tracing, and record the filter function */
  507. ftrace_enabled = 1;
  508. /* Handle PPC64 '.' name */
  509. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  510. len = strlen(func_name);
  511. ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
  512. /*
  513. * If DYNAMIC_FTRACE is not set, then we just trace all functions.
  514. * This test really doesn't care.
  515. */
  516. if (ret && ret != -ENODEV) {
  517. pr_cont("*Could not set filter* ");
  518. goto out;
  519. }
  520. ret = register_ftrace_function(&test_regs_probe);
  521. /*
  522. * Now if the arch does not support passing regs, then this should
  523. * have failed.
  524. */
  525. if (!supported) {
  526. if (!ret) {
  527. pr_cont("*registered save-regs without arch support* ");
  528. goto out;
  529. }
  530. test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
  531. ret = register_ftrace_function(&test_regs_probe);
  532. }
  533. if (ret) {
  534. pr_cont("*could not register callback* ");
  535. goto out;
  536. }
  537. DYN_FTRACE_TEST_NAME();
  538. unregister_ftrace_function(&test_regs_probe);
  539. ret = -1;
  540. switch (trace_selftest_regs_stat) {
  541. case TRACE_SELFTEST_REGS_START:
  542. pr_cont("*callback never called* ");
  543. goto out;
  544. case TRACE_SELFTEST_REGS_FOUND:
  545. if (supported)
  546. break;
  547. pr_cont("*callback received regs without arch support* ");
  548. goto out;
  549. case TRACE_SELFTEST_REGS_NOT_FOUND:
  550. if (!supported)
  551. break;
  552. pr_cont("*callback received NULL regs* ");
  553. goto out;
  554. }
  555. ret = 0;
  556. out:
  557. ftrace_enabled = save_ftrace_enabled;
  558. return ret;
  559. }
  560. /*
  561. * Simple verification test of ftrace function tracer.
  562. * Enable ftrace, sleep 1/10 second, and then read the trace
  563. * buffer to see if all is in order.
  564. */
  565. __init int
  566. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  567. {
  568. int save_ftrace_enabled = ftrace_enabled;
  569. unsigned long count;
  570. int ret;
  571. #ifdef CONFIG_DYNAMIC_FTRACE
  572. if (ftrace_filter_param) {
  573. printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
  574. return 0;
  575. }
  576. #endif
  577. /* make sure msleep has been recorded */
  578. msleep(1);
  579. /* start the tracing */
  580. ftrace_enabled = 1;
  581. ret = tracer_init(trace, tr);
  582. if (ret) {
  583. warn_failed_init_tracer(trace, ret);
  584. goto out;
  585. }
  586. /* Sleep for a 1/10 of a second */
  587. msleep(100);
  588. /* stop the tracing. */
  589. tracing_stop();
  590. ftrace_enabled = 0;
  591. /* check the trace buffer */
  592. ret = trace_test_buffer(&tr->array_buffer, &count);
  593. ftrace_enabled = 1;
  594. trace->reset(tr);
  595. tracing_start();
  596. if (!ret && !count) {
  597. printk(KERN_CONT ".. no entries found ..");
  598. ret = -1;
  599. goto out;
  600. }
  601. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  602. DYN_FTRACE_TEST_NAME);
  603. if (ret)
  604. goto out;
  605. ret = trace_selftest_function_recursion();
  606. if (ret)
  607. goto out;
  608. ret = trace_selftest_function_regs();
  609. out:
  610. ftrace_enabled = save_ftrace_enabled;
  611. /* kill ftrace totally if we failed */
  612. if (ret)
  613. ftrace_kill();
  614. return ret;
  615. }
  616. #endif /* CONFIG_FUNCTION_TRACER */
  617. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  618. /* Maximum number of functions to trace before diagnosing a hang */
  619. #define GRAPH_MAX_FUNC_TEST 100000000
  620. static unsigned int graph_hang_thresh;
  621. /* Wrap the real function entry probe to avoid possible hanging */
  622. static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
  623. {
  624. /* This is harmlessly racy, we want to approximately detect a hang */
  625. if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
  626. ftrace_graph_stop();
  627. printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
  628. if (ftrace_dump_on_oops) {
  629. ftrace_dump(DUMP_ALL);
  630. /* ftrace_dump() disables tracing */
  631. tracing_on();
  632. }
  633. return 0;
  634. }
  635. return trace_graph_entry(trace);
  636. }
  637. static struct fgraph_ops fgraph_ops __initdata = {
  638. .entryfunc = &trace_graph_entry_watchdog,
  639. .retfunc = &trace_graph_return,
  640. };
  641. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  642. noinline __noclone static void trace_direct_tramp(void) { }
  643. #endif
  644. /*
  645. * Pretty much the same than for the function tracer from which the selftest
  646. * has been borrowed.
  647. */
  648. __init int
  649. trace_selftest_startup_function_graph(struct tracer *trace,
  650. struct trace_array *tr)
  651. {
  652. int ret;
  653. unsigned long count;
  654. char *func_name __maybe_unused;
  655. #ifdef CONFIG_DYNAMIC_FTRACE
  656. if (ftrace_filter_param) {
  657. printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
  658. return 0;
  659. }
  660. #endif
  661. /*
  662. * Simulate the init() callback but we attach a watchdog callback
  663. * to detect and recover from possible hangs
  664. */
  665. tracing_reset_online_cpus(&tr->array_buffer);
  666. set_graph_array(tr);
  667. ret = register_ftrace_graph(&fgraph_ops);
  668. if (ret) {
  669. warn_failed_init_tracer(trace, ret);
  670. goto out;
  671. }
  672. tracing_start_cmdline_record();
  673. /* Sleep for a 1/10 of a second */
  674. msleep(100);
  675. /* Have we just recovered from a hang? */
  676. if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
  677. disable_tracing_selftest("recovering from a hang");
  678. ret = -1;
  679. goto out;
  680. }
  681. tracing_stop();
  682. /* check the trace buffer */
  683. ret = trace_test_buffer(&tr->array_buffer, &count);
  684. /* Need to also simulate the tr->reset to remove this fgraph_ops */
  685. tracing_stop_cmdline_record();
  686. unregister_ftrace_graph(&fgraph_ops);
  687. tracing_start();
  688. if (!ret && !count) {
  689. printk(KERN_CONT ".. no entries found ..");
  690. ret = -1;
  691. goto out;
  692. }
  693. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  694. tracing_reset_online_cpus(&tr->array_buffer);
  695. set_graph_array(tr);
  696. /*
  697. * Some archs *cough*PowerPC*cough* add characters to the
  698. * start of the function names. We simply put a '*' to
  699. * accommodate them.
  700. */
  701. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  702. ftrace_set_global_filter(func_name, strlen(func_name), 1);
  703. /*
  704. * Register direct function together with graph tracer
  705. * and make sure we get graph trace.
  706. */
  707. ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME,
  708. (unsigned long) trace_direct_tramp);
  709. if (ret)
  710. goto out;
  711. ret = register_ftrace_graph(&fgraph_ops);
  712. if (ret) {
  713. warn_failed_init_tracer(trace, ret);
  714. goto out;
  715. }
  716. DYN_FTRACE_TEST_NAME();
  717. count = 0;
  718. tracing_stop();
  719. /* check the trace buffer */
  720. ret = trace_test_buffer(&tr->array_buffer, &count);
  721. unregister_ftrace_graph(&fgraph_ops);
  722. ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME,
  723. (unsigned long) trace_direct_tramp);
  724. if (ret)
  725. goto out;
  726. tracing_start();
  727. if (!ret && !count) {
  728. ret = -1;
  729. goto out;
  730. }
  731. /* Enable tracing on all functions again */
  732. ftrace_set_global_filter(NULL, 0, 1);
  733. #endif
  734. /* Don't test dynamic tracing, the function tracer already did */
  735. out:
  736. /* Stop it if we failed */
  737. if (ret)
  738. ftrace_graph_stop();
  739. return ret;
  740. }
  741. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  742. #ifdef CONFIG_IRQSOFF_TRACER
  743. int
  744. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  745. {
  746. unsigned long save_max = tr->max_latency;
  747. unsigned long count;
  748. int ret;
  749. /* start the tracing */
  750. ret = tracer_init(trace, tr);
  751. if (ret) {
  752. warn_failed_init_tracer(trace, ret);
  753. return ret;
  754. }
  755. /* reset the max latency */
  756. tr->max_latency = 0;
  757. /* disable interrupts for a bit */
  758. local_irq_disable();
  759. udelay(100);
  760. local_irq_enable();
  761. /*
  762. * Stop the tracer to avoid a warning subsequent
  763. * to buffer flipping failure because tracing_stop()
  764. * disables the tr and max buffers, making flipping impossible
  765. * in case of parallels max irqs off latencies.
  766. */
  767. trace->stop(tr);
  768. /* stop the tracing. */
  769. tracing_stop();
  770. /* check both trace buffers */
  771. ret = trace_test_buffer(&tr->array_buffer, NULL);
  772. if (!ret)
  773. ret = trace_test_buffer(&tr->max_buffer, &count);
  774. trace->reset(tr);
  775. tracing_start();
  776. if (!ret && !count) {
  777. printk(KERN_CONT ".. no entries found ..");
  778. ret = -1;
  779. }
  780. tr->max_latency = save_max;
  781. return ret;
  782. }
  783. #endif /* CONFIG_IRQSOFF_TRACER */
  784. #ifdef CONFIG_PREEMPT_TRACER
  785. int
  786. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  787. {
  788. unsigned long save_max = tr->max_latency;
  789. unsigned long count;
  790. int ret;
  791. /*
  792. * Now that the big kernel lock is no longer preemptible,
  793. * and this is called with the BKL held, it will always
  794. * fail. If preemption is already disabled, simply
  795. * pass the test. When the BKL is removed, or becomes
  796. * preemptible again, we will once again test this,
  797. * so keep it in.
  798. */
  799. if (preempt_count()) {
  800. printk(KERN_CONT "can not test ... force ");
  801. return 0;
  802. }
  803. /* start the tracing */
  804. ret = tracer_init(trace, tr);
  805. if (ret) {
  806. warn_failed_init_tracer(trace, ret);
  807. return ret;
  808. }
  809. /* reset the max latency */
  810. tr->max_latency = 0;
  811. /* disable preemption for a bit */
  812. preempt_disable();
  813. udelay(100);
  814. preempt_enable();
  815. /*
  816. * Stop the tracer to avoid a warning subsequent
  817. * to buffer flipping failure because tracing_stop()
  818. * disables the tr and max buffers, making flipping impossible
  819. * in case of parallels max preempt off latencies.
  820. */
  821. trace->stop(tr);
  822. /* stop the tracing. */
  823. tracing_stop();
  824. /* check both trace buffers */
  825. ret = trace_test_buffer(&tr->array_buffer, NULL);
  826. if (!ret)
  827. ret = trace_test_buffer(&tr->max_buffer, &count);
  828. trace->reset(tr);
  829. tracing_start();
  830. if (!ret && !count) {
  831. printk(KERN_CONT ".. no entries found ..");
  832. ret = -1;
  833. }
  834. tr->max_latency = save_max;
  835. return ret;
  836. }
  837. #endif /* CONFIG_PREEMPT_TRACER */
  838. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  839. int
  840. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  841. {
  842. unsigned long save_max = tr->max_latency;
  843. unsigned long count;
  844. int ret;
  845. /*
  846. * Now that the big kernel lock is no longer preemptible,
  847. * and this is called with the BKL held, it will always
  848. * fail. If preemption is already disabled, simply
  849. * pass the test. When the BKL is removed, or becomes
  850. * preemptible again, we will once again test this,
  851. * so keep it in.
  852. */
  853. if (preempt_count()) {
  854. printk(KERN_CONT "can not test ... force ");
  855. return 0;
  856. }
  857. /* start the tracing */
  858. ret = tracer_init(trace, tr);
  859. if (ret) {
  860. warn_failed_init_tracer(trace, ret);
  861. goto out_no_start;
  862. }
  863. /* reset the max latency */
  864. tr->max_latency = 0;
  865. /* disable preemption and interrupts for a bit */
  866. preempt_disable();
  867. local_irq_disable();
  868. udelay(100);
  869. preempt_enable();
  870. /* reverse the order of preempt vs irqs */
  871. local_irq_enable();
  872. /*
  873. * Stop the tracer to avoid a warning subsequent
  874. * to buffer flipping failure because tracing_stop()
  875. * disables the tr and max buffers, making flipping impossible
  876. * in case of parallels max irqs/preempt off latencies.
  877. */
  878. trace->stop(tr);
  879. /* stop the tracing. */
  880. tracing_stop();
  881. /* check both trace buffers */
  882. ret = trace_test_buffer(&tr->array_buffer, NULL);
  883. if (ret)
  884. goto out;
  885. ret = trace_test_buffer(&tr->max_buffer, &count);
  886. if (ret)
  887. goto out;
  888. if (!ret && !count) {
  889. printk(KERN_CONT ".. no entries found ..");
  890. ret = -1;
  891. goto out;
  892. }
  893. /* do the test by disabling interrupts first this time */
  894. tr->max_latency = 0;
  895. tracing_start();
  896. trace->start(tr);
  897. preempt_disable();
  898. local_irq_disable();
  899. udelay(100);
  900. preempt_enable();
  901. /* reverse the order of preempt vs irqs */
  902. local_irq_enable();
  903. trace->stop(tr);
  904. /* stop the tracing. */
  905. tracing_stop();
  906. /* check both trace buffers */
  907. ret = trace_test_buffer(&tr->array_buffer, NULL);
  908. if (ret)
  909. goto out;
  910. ret = trace_test_buffer(&tr->max_buffer, &count);
  911. if (!ret && !count) {
  912. printk(KERN_CONT ".. no entries found ..");
  913. ret = -1;
  914. goto out;
  915. }
  916. out:
  917. tracing_start();
  918. out_no_start:
  919. trace->reset(tr);
  920. tr->max_latency = save_max;
  921. return ret;
  922. }
  923. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  924. #ifdef CONFIG_NOP_TRACER
  925. int
  926. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  927. {
  928. /* What could possibly go wrong? */
  929. return 0;
  930. }
  931. #endif
  932. #ifdef CONFIG_SCHED_TRACER
  933. struct wakeup_test_data {
  934. struct completion is_ready;
  935. int go;
  936. };
  937. static int trace_wakeup_test_thread(void *data)
  938. {
  939. /* Make this a -deadline thread */
  940. static const struct sched_attr attr = {
  941. .sched_policy = SCHED_DEADLINE,
  942. .sched_runtime = 100000ULL,
  943. .sched_deadline = 10000000ULL,
  944. .sched_period = 10000000ULL
  945. };
  946. struct wakeup_test_data *x = data;
  947. sched_setattr(current, &attr);
  948. /* Make it know we have a new prio */
  949. complete(&x->is_ready);
  950. /* now go to sleep and let the test wake us up */
  951. set_current_state(TASK_INTERRUPTIBLE);
  952. while (!x->go) {
  953. schedule();
  954. set_current_state(TASK_INTERRUPTIBLE);
  955. }
  956. complete(&x->is_ready);
  957. set_current_state(TASK_INTERRUPTIBLE);
  958. /* we are awake, now wait to disappear */
  959. while (!kthread_should_stop()) {
  960. schedule();
  961. set_current_state(TASK_INTERRUPTIBLE);
  962. }
  963. __set_current_state(TASK_RUNNING);
  964. return 0;
  965. }
  966. int
  967. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  968. {
  969. unsigned long save_max = tr->max_latency;
  970. struct task_struct *p;
  971. struct wakeup_test_data data;
  972. unsigned long count;
  973. int ret;
  974. memset(&data, 0, sizeof(data));
  975. init_completion(&data.is_ready);
  976. /* create a -deadline thread */
  977. p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
  978. if (IS_ERR(p)) {
  979. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  980. return -1;
  981. }
  982. /* make sure the thread is running at -deadline policy */
  983. wait_for_completion(&data.is_ready);
  984. /* start the tracing */
  985. ret = tracer_init(trace, tr);
  986. if (ret) {
  987. warn_failed_init_tracer(trace, ret);
  988. return ret;
  989. }
  990. /* reset the max latency */
  991. tr->max_latency = 0;
  992. while (p->on_rq) {
  993. /*
  994. * Sleep to make sure the -deadline thread is asleep too.
  995. * On virtual machines we can't rely on timings,
  996. * but we want to make sure this test still works.
  997. */
  998. msleep(100);
  999. }
  1000. init_completion(&data.is_ready);
  1001. data.go = 1;
  1002. /* memory barrier is in the wake_up_process() */
  1003. wake_up_process(p);
  1004. /* Wait for the task to wake up */
  1005. wait_for_completion(&data.is_ready);
  1006. /* stop the tracing. */
  1007. tracing_stop();
  1008. /* check both trace buffers */
  1009. ret = trace_test_buffer(&tr->array_buffer, NULL);
  1010. if (!ret)
  1011. ret = trace_test_buffer(&tr->max_buffer, &count);
  1012. trace->reset(tr);
  1013. tracing_start();
  1014. tr->max_latency = save_max;
  1015. /* kill the thread */
  1016. kthread_stop(p);
  1017. if (!ret && !count) {
  1018. printk(KERN_CONT ".. no entries found ..");
  1019. ret = -1;
  1020. }
  1021. return ret;
  1022. }
  1023. #endif /* CONFIG_SCHED_TRACER */
  1024. #ifdef CONFIG_BRANCH_TRACER
  1025. int
  1026. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  1027. {
  1028. unsigned long count;
  1029. int ret;
  1030. /* start the tracing */
  1031. ret = tracer_init(trace, tr);
  1032. if (ret) {
  1033. warn_failed_init_tracer(trace, ret);
  1034. return ret;
  1035. }
  1036. /* Sleep for a 1/10 of a second */
  1037. msleep(100);
  1038. /* stop the tracing. */
  1039. tracing_stop();
  1040. /* check the trace buffer */
  1041. ret = trace_test_buffer(&tr->array_buffer, &count);
  1042. trace->reset(tr);
  1043. tracing_start();
  1044. if (!ret && !count) {
  1045. printk(KERN_CONT ".. no entries found ..");
  1046. ret = -1;
  1047. }
  1048. return ret;
  1049. }
  1050. #endif /* CONFIG_BRANCH_TRACER */