trace_functions.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ring buffer based function tracer
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
  6. * Copyright (C) 2008 Ingo Molnar <[email protected]>
  7. *
  8. * Based on code from the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/ring_buffer.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/slab.h>
  18. #include <linux/fs.h>
  19. #include "trace.h"
  20. static void tracing_start_function_trace(struct trace_array *tr);
  21. static void tracing_stop_function_trace(struct trace_array *tr);
  22. static void
  23. function_trace_call(unsigned long ip, unsigned long parent_ip,
  24. struct ftrace_ops *op, struct ftrace_regs *fregs);
  25. static void
  26. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  27. struct ftrace_ops *op, struct ftrace_regs *fregs);
  28. static void
  29. function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  30. struct ftrace_ops *op, struct ftrace_regs *fregs);
  31. static void
  32. function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  33. struct ftrace_ops *op,
  34. struct ftrace_regs *fregs);
  35. static struct tracer_flags func_flags;
  36. /* Our option */
  37. enum {
  38. TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
  39. TRACE_FUNC_OPT_STACK = 0x1,
  40. TRACE_FUNC_OPT_NO_REPEATS = 0x2,
  41. /* Update this to next highest bit. */
  42. TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
  43. };
  44. #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
  45. int ftrace_allocate_ftrace_ops(struct trace_array *tr)
  46. {
  47. struct ftrace_ops *ops;
  48. /* The top level array uses the "global_ops" */
  49. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  50. return 0;
  51. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  52. if (!ops)
  53. return -ENOMEM;
  54. /* Currently only the non stack version is supported */
  55. ops->func = function_trace_call;
  56. ops->flags = FTRACE_OPS_FL_PID;
  57. tr->ops = ops;
  58. ops->private = tr;
  59. return 0;
  60. }
  61. void ftrace_free_ftrace_ops(struct trace_array *tr)
  62. {
  63. kfree(tr->ops);
  64. tr->ops = NULL;
  65. }
  66. int ftrace_create_function_files(struct trace_array *tr,
  67. struct dentry *parent)
  68. {
  69. /*
  70. * The top level array uses the "global_ops", and the files are
  71. * created on boot up.
  72. */
  73. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  74. return 0;
  75. if (!tr->ops)
  76. return -EINVAL;
  77. ftrace_create_filter_files(tr->ops, parent);
  78. return 0;
  79. }
  80. void ftrace_destroy_function_files(struct trace_array *tr)
  81. {
  82. ftrace_destroy_filter_files(tr->ops);
  83. ftrace_free_ftrace_ops(tr);
  84. }
  85. static ftrace_func_t select_trace_function(u32 flags_val)
  86. {
  87. switch (flags_val & TRACE_FUNC_OPT_MASK) {
  88. case TRACE_FUNC_NO_OPTS:
  89. return function_trace_call;
  90. case TRACE_FUNC_OPT_STACK:
  91. return function_stack_trace_call;
  92. case TRACE_FUNC_OPT_NO_REPEATS:
  93. return function_no_repeats_trace_call;
  94. case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
  95. return function_stack_no_repeats_trace_call;
  96. default:
  97. return NULL;
  98. }
  99. }
  100. static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
  101. {
  102. if (!tr->last_func_repeats &&
  103. (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
  104. tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
  105. if (!tr->last_func_repeats)
  106. return false;
  107. }
  108. return true;
  109. }
  110. static int function_trace_init(struct trace_array *tr)
  111. {
  112. ftrace_func_t func;
  113. /*
  114. * Instance trace_arrays get their ops allocated
  115. * at instance creation. Unless it failed
  116. * the allocation.
  117. */
  118. if (!tr->ops)
  119. return -ENOMEM;
  120. func = select_trace_function(func_flags.val);
  121. if (!func)
  122. return -EINVAL;
  123. if (!handle_func_repeats(tr, func_flags.val))
  124. return -ENOMEM;
  125. ftrace_init_array_ops(tr, func);
  126. tr->array_buffer.cpu = raw_smp_processor_id();
  127. tracing_start_cmdline_record();
  128. tracing_start_function_trace(tr);
  129. return 0;
  130. }
  131. static void function_trace_reset(struct trace_array *tr)
  132. {
  133. tracing_stop_function_trace(tr);
  134. tracing_stop_cmdline_record();
  135. ftrace_reset_array_ops(tr);
  136. }
  137. static void function_trace_start(struct trace_array *tr)
  138. {
  139. tracing_reset_online_cpus(&tr->array_buffer);
  140. }
  141. static void
  142. function_trace_call(unsigned long ip, unsigned long parent_ip,
  143. struct ftrace_ops *op, struct ftrace_regs *fregs)
  144. {
  145. struct trace_array *tr = op->private;
  146. struct trace_array_cpu *data;
  147. unsigned int trace_ctx;
  148. int bit;
  149. int cpu;
  150. if (unlikely(!tr->function_enabled))
  151. return;
  152. bit = ftrace_test_recursion_trylock(ip, parent_ip);
  153. if (bit < 0)
  154. return;
  155. trace_ctx = tracing_gen_ctx();
  156. cpu = smp_processor_id();
  157. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  158. if (!atomic_read(&data->disabled))
  159. trace_function(tr, ip, parent_ip, trace_ctx);
  160. ftrace_test_recursion_unlock(bit);
  161. }
  162. #ifdef CONFIG_UNWINDER_ORC
  163. /*
  164. * Skip 2:
  165. *
  166. * function_stack_trace_call()
  167. * ftrace_call()
  168. */
  169. #define STACK_SKIP 2
  170. #else
  171. /*
  172. * Skip 3:
  173. * __trace_stack()
  174. * function_stack_trace_call()
  175. * ftrace_call()
  176. */
  177. #define STACK_SKIP 3
  178. #endif
  179. static void
  180. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  181. struct ftrace_ops *op, struct ftrace_regs *fregs)
  182. {
  183. struct trace_array *tr = op->private;
  184. struct trace_array_cpu *data;
  185. unsigned long flags;
  186. long disabled;
  187. int cpu;
  188. unsigned int trace_ctx;
  189. if (unlikely(!tr->function_enabled))
  190. return;
  191. /*
  192. * Need to use raw, since this must be called before the
  193. * recursive protection is performed.
  194. */
  195. local_irq_save(flags);
  196. cpu = raw_smp_processor_id();
  197. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  198. disabled = atomic_inc_return(&data->disabled);
  199. if (likely(disabled == 1)) {
  200. trace_ctx = tracing_gen_ctx_flags(flags);
  201. trace_function(tr, ip, parent_ip, trace_ctx);
  202. __trace_stack(tr, trace_ctx, STACK_SKIP);
  203. }
  204. atomic_dec(&data->disabled);
  205. local_irq_restore(flags);
  206. }
  207. static inline bool is_repeat_check(struct trace_array *tr,
  208. struct trace_func_repeats *last_info,
  209. unsigned long ip, unsigned long parent_ip)
  210. {
  211. if (last_info->ip == ip &&
  212. last_info->parent_ip == parent_ip &&
  213. last_info->count < U16_MAX) {
  214. last_info->ts_last_call =
  215. ring_buffer_time_stamp(tr->array_buffer.buffer);
  216. last_info->count++;
  217. return true;
  218. }
  219. return false;
  220. }
  221. static inline void process_repeats(struct trace_array *tr,
  222. unsigned long ip, unsigned long parent_ip,
  223. struct trace_func_repeats *last_info,
  224. unsigned int trace_ctx)
  225. {
  226. if (last_info->count) {
  227. trace_last_func_repeats(tr, last_info, trace_ctx);
  228. last_info->count = 0;
  229. }
  230. last_info->ip = ip;
  231. last_info->parent_ip = parent_ip;
  232. }
  233. static void
  234. function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  235. struct ftrace_ops *op,
  236. struct ftrace_regs *fregs)
  237. {
  238. struct trace_func_repeats *last_info;
  239. struct trace_array *tr = op->private;
  240. struct trace_array_cpu *data;
  241. unsigned int trace_ctx;
  242. unsigned long flags;
  243. int bit;
  244. int cpu;
  245. if (unlikely(!tr->function_enabled))
  246. return;
  247. bit = ftrace_test_recursion_trylock(ip, parent_ip);
  248. if (bit < 0)
  249. return;
  250. cpu = smp_processor_id();
  251. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  252. if (atomic_read(&data->disabled))
  253. goto out;
  254. /*
  255. * An interrupt may happen at any place here. But as far as I can see,
  256. * the only damage that this can cause is to mess up the repetition
  257. * counter without valuable data being lost.
  258. * TODO: think about a solution that is better than just hoping to be
  259. * lucky.
  260. */
  261. last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
  262. if (is_repeat_check(tr, last_info, ip, parent_ip))
  263. goto out;
  264. local_save_flags(flags);
  265. trace_ctx = tracing_gen_ctx_flags(flags);
  266. process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
  267. trace_function(tr, ip, parent_ip, trace_ctx);
  268. out:
  269. ftrace_test_recursion_unlock(bit);
  270. }
  271. static void
  272. function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  273. struct ftrace_ops *op,
  274. struct ftrace_regs *fregs)
  275. {
  276. struct trace_func_repeats *last_info;
  277. struct trace_array *tr = op->private;
  278. struct trace_array_cpu *data;
  279. unsigned long flags;
  280. long disabled;
  281. int cpu;
  282. unsigned int trace_ctx;
  283. if (unlikely(!tr->function_enabled))
  284. return;
  285. /*
  286. * Need to use raw, since this must be called before the
  287. * recursive protection is performed.
  288. */
  289. local_irq_save(flags);
  290. cpu = raw_smp_processor_id();
  291. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  292. disabled = atomic_inc_return(&data->disabled);
  293. if (likely(disabled == 1)) {
  294. last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
  295. if (is_repeat_check(tr, last_info, ip, parent_ip))
  296. goto out;
  297. trace_ctx = tracing_gen_ctx_flags(flags);
  298. process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
  299. trace_function(tr, ip, parent_ip, trace_ctx);
  300. __trace_stack(tr, trace_ctx, STACK_SKIP);
  301. }
  302. out:
  303. atomic_dec(&data->disabled);
  304. local_irq_restore(flags);
  305. }
  306. static struct tracer_opt func_opts[] = {
  307. #ifdef CONFIG_STACKTRACE
  308. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  309. #endif
  310. { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
  311. { } /* Always set a last empty entry */
  312. };
  313. static struct tracer_flags func_flags = {
  314. .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
  315. .opts = func_opts
  316. };
  317. static void tracing_start_function_trace(struct trace_array *tr)
  318. {
  319. tr->function_enabled = 0;
  320. register_ftrace_function(tr->ops);
  321. tr->function_enabled = 1;
  322. }
  323. static void tracing_stop_function_trace(struct trace_array *tr)
  324. {
  325. tr->function_enabled = 0;
  326. unregister_ftrace_function(tr->ops);
  327. }
  328. static struct tracer function_trace;
  329. static int
  330. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  331. {
  332. ftrace_func_t func;
  333. u32 new_flags;
  334. /* Do nothing if already set. */
  335. if (!!set == !!(func_flags.val & bit))
  336. return 0;
  337. /* We can change this flag only when not running. */
  338. if (tr->current_trace != &function_trace)
  339. return 0;
  340. new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
  341. func = select_trace_function(new_flags);
  342. if (!func)
  343. return -EINVAL;
  344. /* Check if there's anything to change. */
  345. if (tr->ops->func == func)
  346. return 0;
  347. if (!handle_func_repeats(tr, new_flags))
  348. return -ENOMEM;
  349. unregister_ftrace_function(tr->ops);
  350. tr->ops->func = func;
  351. register_ftrace_function(tr->ops);
  352. return 0;
  353. }
  354. static struct tracer function_trace __tracer_data =
  355. {
  356. .name = "function",
  357. .init = function_trace_init,
  358. .reset = function_trace_reset,
  359. .start = function_trace_start,
  360. .flags = &func_flags,
  361. .set_flag = func_set_flag,
  362. .allow_instances = true,
  363. #ifdef CONFIG_FTRACE_SELFTEST
  364. .selftest = trace_selftest_startup_function,
  365. #endif
  366. };
  367. #ifdef CONFIG_DYNAMIC_FTRACE
  368. static void update_traceon_count(struct ftrace_probe_ops *ops,
  369. unsigned long ip,
  370. struct trace_array *tr, bool on,
  371. void *data)
  372. {
  373. struct ftrace_func_mapper *mapper = data;
  374. long *count;
  375. long old_count;
  376. /*
  377. * Tracing gets disabled (or enabled) once per count.
  378. * This function can be called at the same time on multiple CPUs.
  379. * It is fine if both disable (or enable) tracing, as disabling
  380. * (or enabling) the second time doesn't do anything as the
  381. * state of the tracer is already disabled (or enabled).
  382. * What needs to be synchronized in this case is that the count
  383. * only gets decremented once, even if the tracer is disabled
  384. * (or enabled) twice, as the second one is really a nop.
  385. *
  386. * The memory barriers guarantee that we only decrement the
  387. * counter once. First the count is read to a local variable
  388. * and a read barrier is used to make sure that it is loaded
  389. * before checking if the tracer is in the state we want.
  390. * If the tracer is not in the state we want, then the count
  391. * is guaranteed to be the old count.
  392. *
  393. * Next the tracer is set to the state we want (disabled or enabled)
  394. * then a write memory barrier is used to make sure that
  395. * the new state is visible before changing the counter by
  396. * one minus the old counter. This guarantees that another CPU
  397. * executing this code will see the new state before seeing
  398. * the new counter value, and would not do anything if the new
  399. * counter is seen.
  400. *
  401. * Note, there is no synchronization between this and a user
  402. * setting the tracing_on file. But we currently don't care
  403. * about that.
  404. */
  405. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  406. old_count = *count;
  407. if (old_count <= 0)
  408. return;
  409. /* Make sure we see count before checking tracing state */
  410. smp_rmb();
  411. if (on == !!tracer_tracing_is_on(tr))
  412. return;
  413. if (on)
  414. tracer_tracing_on(tr);
  415. else
  416. tracer_tracing_off(tr);
  417. /* Make sure tracing state is visible before updating count */
  418. smp_wmb();
  419. *count = old_count - 1;
  420. }
  421. static void
  422. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
  423. struct trace_array *tr, struct ftrace_probe_ops *ops,
  424. void *data)
  425. {
  426. update_traceon_count(ops, ip, tr, 1, data);
  427. }
  428. static void
  429. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
  430. struct trace_array *tr, struct ftrace_probe_ops *ops,
  431. void *data)
  432. {
  433. update_traceon_count(ops, ip, tr, 0, data);
  434. }
  435. static void
  436. ftrace_traceon(unsigned long ip, unsigned long parent_ip,
  437. struct trace_array *tr, struct ftrace_probe_ops *ops,
  438. void *data)
  439. {
  440. if (tracer_tracing_is_on(tr))
  441. return;
  442. tracer_tracing_on(tr);
  443. }
  444. static void
  445. ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
  446. struct trace_array *tr, struct ftrace_probe_ops *ops,
  447. void *data)
  448. {
  449. if (!tracer_tracing_is_on(tr))
  450. return;
  451. tracer_tracing_off(tr);
  452. }
  453. #ifdef CONFIG_UNWINDER_ORC
  454. /*
  455. * Skip 3:
  456. *
  457. * function_trace_probe_call()
  458. * ftrace_ops_assist_func()
  459. * ftrace_call()
  460. */
  461. #define FTRACE_STACK_SKIP 3
  462. #else
  463. /*
  464. * Skip 5:
  465. *
  466. * __trace_stack()
  467. * ftrace_stacktrace()
  468. * function_trace_probe_call()
  469. * ftrace_ops_assist_func()
  470. * ftrace_call()
  471. */
  472. #define FTRACE_STACK_SKIP 5
  473. #endif
  474. static __always_inline void trace_stack(struct trace_array *tr)
  475. {
  476. unsigned int trace_ctx;
  477. trace_ctx = tracing_gen_ctx();
  478. __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
  479. }
  480. static void
  481. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
  482. struct trace_array *tr, struct ftrace_probe_ops *ops,
  483. void *data)
  484. {
  485. trace_stack(tr);
  486. }
  487. static void
  488. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
  489. struct trace_array *tr, struct ftrace_probe_ops *ops,
  490. void *data)
  491. {
  492. struct ftrace_func_mapper *mapper = data;
  493. long *count;
  494. long old_count;
  495. long new_count;
  496. if (!tracing_is_on())
  497. return;
  498. /* unlimited? */
  499. if (!mapper) {
  500. trace_stack(tr);
  501. return;
  502. }
  503. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  504. /*
  505. * Stack traces should only execute the number of times the
  506. * user specified in the counter.
  507. */
  508. do {
  509. old_count = *count;
  510. if (!old_count)
  511. return;
  512. new_count = old_count - 1;
  513. new_count = cmpxchg(count, old_count, new_count);
  514. if (new_count == old_count)
  515. trace_stack(tr);
  516. if (!tracing_is_on())
  517. return;
  518. } while (new_count != old_count);
  519. }
  520. static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
  521. void *data)
  522. {
  523. struct ftrace_func_mapper *mapper = data;
  524. long *count = NULL;
  525. if (mapper)
  526. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  527. if (count) {
  528. if (*count <= 0)
  529. return 0;
  530. (*count)--;
  531. }
  532. return 1;
  533. }
  534. static void
  535. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
  536. struct trace_array *tr, struct ftrace_probe_ops *ops,
  537. void *data)
  538. {
  539. if (update_count(ops, ip, data))
  540. ftrace_dump(DUMP_ALL);
  541. }
  542. /* Only dump the current CPU buffer. */
  543. static void
  544. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
  545. struct trace_array *tr, struct ftrace_probe_ops *ops,
  546. void *data)
  547. {
  548. if (update_count(ops, ip, data))
  549. ftrace_dump(DUMP_ORIG);
  550. }
  551. static int
  552. ftrace_probe_print(const char *name, struct seq_file *m,
  553. unsigned long ip, struct ftrace_probe_ops *ops,
  554. void *data)
  555. {
  556. struct ftrace_func_mapper *mapper = data;
  557. long *count = NULL;
  558. seq_printf(m, "%ps:%s", (void *)ip, name);
  559. if (mapper)
  560. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  561. if (count)
  562. seq_printf(m, ":count=%ld\n", *count);
  563. else
  564. seq_puts(m, ":unlimited\n");
  565. return 0;
  566. }
  567. static int
  568. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  569. struct ftrace_probe_ops *ops,
  570. void *data)
  571. {
  572. return ftrace_probe_print("traceon", m, ip, ops, data);
  573. }
  574. static int
  575. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  576. struct ftrace_probe_ops *ops, void *data)
  577. {
  578. return ftrace_probe_print("traceoff", m, ip, ops, data);
  579. }
  580. static int
  581. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  582. struct ftrace_probe_ops *ops, void *data)
  583. {
  584. return ftrace_probe_print("stacktrace", m, ip, ops, data);
  585. }
  586. static int
  587. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  588. struct ftrace_probe_ops *ops, void *data)
  589. {
  590. return ftrace_probe_print("dump", m, ip, ops, data);
  591. }
  592. static int
  593. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  594. struct ftrace_probe_ops *ops, void *data)
  595. {
  596. return ftrace_probe_print("cpudump", m, ip, ops, data);
  597. }
  598. static int
  599. ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
  600. unsigned long ip, void *init_data, void **data)
  601. {
  602. struct ftrace_func_mapper *mapper = *data;
  603. if (!mapper) {
  604. mapper = allocate_ftrace_func_mapper();
  605. if (!mapper)
  606. return -ENOMEM;
  607. *data = mapper;
  608. }
  609. return ftrace_func_mapper_add_ip(mapper, ip, init_data);
  610. }
  611. static void
  612. ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
  613. unsigned long ip, void *data)
  614. {
  615. struct ftrace_func_mapper *mapper = data;
  616. if (!ip) {
  617. free_ftrace_func_mapper(mapper, NULL);
  618. return;
  619. }
  620. ftrace_func_mapper_remove_ip(mapper, ip);
  621. }
  622. static struct ftrace_probe_ops traceon_count_probe_ops = {
  623. .func = ftrace_traceon_count,
  624. .print = ftrace_traceon_print,
  625. .init = ftrace_count_init,
  626. .free = ftrace_count_free,
  627. };
  628. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  629. .func = ftrace_traceoff_count,
  630. .print = ftrace_traceoff_print,
  631. .init = ftrace_count_init,
  632. .free = ftrace_count_free,
  633. };
  634. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  635. .func = ftrace_stacktrace_count,
  636. .print = ftrace_stacktrace_print,
  637. .init = ftrace_count_init,
  638. .free = ftrace_count_free,
  639. };
  640. static struct ftrace_probe_ops dump_probe_ops = {
  641. .func = ftrace_dump_probe,
  642. .print = ftrace_dump_print,
  643. .init = ftrace_count_init,
  644. .free = ftrace_count_free,
  645. };
  646. static struct ftrace_probe_ops cpudump_probe_ops = {
  647. .func = ftrace_cpudump_probe,
  648. .print = ftrace_cpudump_print,
  649. };
  650. static struct ftrace_probe_ops traceon_probe_ops = {
  651. .func = ftrace_traceon,
  652. .print = ftrace_traceon_print,
  653. };
  654. static struct ftrace_probe_ops traceoff_probe_ops = {
  655. .func = ftrace_traceoff,
  656. .print = ftrace_traceoff_print,
  657. };
  658. static struct ftrace_probe_ops stacktrace_probe_ops = {
  659. .func = ftrace_stacktrace,
  660. .print = ftrace_stacktrace_print,
  661. };
  662. static int
  663. ftrace_trace_probe_callback(struct trace_array *tr,
  664. struct ftrace_probe_ops *ops,
  665. struct ftrace_hash *hash, char *glob,
  666. char *cmd, char *param, int enable)
  667. {
  668. void *count = (void *)-1;
  669. char *number;
  670. int ret;
  671. /* hash funcs only work with set_ftrace_filter */
  672. if (!enable)
  673. return -EINVAL;
  674. if (glob[0] == '!')
  675. return unregister_ftrace_function_probe_func(glob+1, tr, ops);
  676. if (!param)
  677. goto out_reg;
  678. number = strsep(&param, ":");
  679. if (!strlen(number))
  680. goto out_reg;
  681. /*
  682. * We use the callback data field (which is a pointer)
  683. * as our counter.
  684. */
  685. ret = kstrtoul(number, 0, (unsigned long *)&count);
  686. if (ret)
  687. return ret;
  688. out_reg:
  689. ret = register_ftrace_function_probe(glob, tr, ops, count);
  690. return ret < 0 ? ret : 0;
  691. }
  692. static int
  693. ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
  694. char *glob, char *cmd, char *param, int enable)
  695. {
  696. struct ftrace_probe_ops *ops;
  697. if (!tr)
  698. return -ENODEV;
  699. /* we register both traceon and traceoff to this callback */
  700. if (strcmp(cmd, "traceon") == 0)
  701. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  702. else
  703. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  704. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  705. param, enable);
  706. }
  707. static int
  708. ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
  709. char *glob, char *cmd, char *param, int enable)
  710. {
  711. struct ftrace_probe_ops *ops;
  712. if (!tr)
  713. return -ENODEV;
  714. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  715. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  716. param, enable);
  717. }
  718. static int
  719. ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  720. char *glob, char *cmd, char *param, int enable)
  721. {
  722. struct ftrace_probe_ops *ops;
  723. if (!tr)
  724. return -ENODEV;
  725. ops = &dump_probe_ops;
  726. /* Only dump once. */
  727. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  728. "1", enable);
  729. }
  730. static int
  731. ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  732. char *glob, char *cmd, char *param, int enable)
  733. {
  734. struct ftrace_probe_ops *ops;
  735. if (!tr)
  736. return -ENODEV;
  737. ops = &cpudump_probe_ops;
  738. /* Only dump once. */
  739. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  740. "1", enable);
  741. }
  742. static struct ftrace_func_command ftrace_traceon_cmd = {
  743. .name = "traceon",
  744. .func = ftrace_trace_onoff_callback,
  745. };
  746. static struct ftrace_func_command ftrace_traceoff_cmd = {
  747. .name = "traceoff",
  748. .func = ftrace_trace_onoff_callback,
  749. };
  750. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  751. .name = "stacktrace",
  752. .func = ftrace_stacktrace_callback,
  753. };
  754. static struct ftrace_func_command ftrace_dump_cmd = {
  755. .name = "dump",
  756. .func = ftrace_dump_callback,
  757. };
  758. static struct ftrace_func_command ftrace_cpudump_cmd = {
  759. .name = "cpudump",
  760. .func = ftrace_cpudump_callback,
  761. };
  762. static int __init init_func_cmd_traceon(void)
  763. {
  764. int ret;
  765. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  766. if (ret)
  767. return ret;
  768. ret = register_ftrace_command(&ftrace_traceon_cmd);
  769. if (ret)
  770. goto out_free_traceoff;
  771. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  772. if (ret)
  773. goto out_free_traceon;
  774. ret = register_ftrace_command(&ftrace_dump_cmd);
  775. if (ret)
  776. goto out_free_stacktrace;
  777. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  778. if (ret)
  779. goto out_free_dump;
  780. return 0;
  781. out_free_dump:
  782. unregister_ftrace_command(&ftrace_dump_cmd);
  783. out_free_stacktrace:
  784. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  785. out_free_traceon:
  786. unregister_ftrace_command(&ftrace_traceon_cmd);
  787. out_free_traceoff:
  788. unregister_ftrace_command(&ftrace_traceoff_cmd);
  789. return ret;
  790. }
  791. #else
  792. static inline int init_func_cmd_traceon(void)
  793. {
  794. return 0;
  795. }
  796. #endif /* CONFIG_DYNAMIC_FTRACE */
  797. __init int init_function_trace(void)
  798. {
  799. init_func_cmd_traceon();
  800. return register_tracer(&function_trace);
  801. }