trace_irqsoff.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * trace irqs off critical timings
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
  6. * Copyright (C) 2008 Ingo Molnar <[email protected]>
  7. *
  8. * From code in the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/kallsyms.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/kprobes.h>
  18. #include "trace.h"
  19. #include <trace/events/preemptirq.h>
  20. #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
  21. static struct trace_array *irqsoff_trace __read_mostly;
  22. static int tracer_enabled __read_mostly;
  23. static DEFINE_PER_CPU(int, tracing_cpu);
  24. static DEFINE_RAW_SPINLOCK(max_trace_lock);
  25. enum {
  26. TRACER_IRQS_OFF = (1 << 1),
  27. TRACER_PREEMPT_OFF = (1 << 2),
  28. };
  29. static int trace_type __read_mostly;
  30. static int save_flags;
  31. static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  32. static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  33. #ifdef CONFIG_PREEMPT_TRACER
  34. static inline int
  35. preempt_trace(int pc)
  36. {
  37. return ((trace_type & TRACER_PREEMPT_OFF) && pc);
  38. }
  39. #else
  40. # define preempt_trace(pc) (0)
  41. #endif
  42. #ifdef CONFIG_IRQSOFF_TRACER
  43. static inline int
  44. irq_trace(void)
  45. {
  46. return ((trace_type & TRACER_IRQS_OFF) &&
  47. irqs_disabled());
  48. }
  49. #else
  50. # define irq_trace() (0)
  51. #endif
  52. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  53. static int irqsoff_display_graph(struct trace_array *tr, int set);
  54. # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  55. #else
  56. static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  57. {
  58. return -EINVAL;
  59. }
  60. # define is_graph(tr) false
  61. #endif
  62. /*
  63. * Sequence count - we record it when starting a measurement and
  64. * skip the latency if the sequence has changed - some other section
  65. * did a maximum and could disturb our measurement with serial console
  66. * printouts, etc. Truly coinciding maximum latencies should be rare
  67. * and what happens together happens separately as well, so this doesn't
  68. * decrease the validity of the maximum found:
  69. */
  70. static __cacheline_aligned_in_smp unsigned long max_sequence;
  71. #ifdef CONFIG_FUNCTION_TRACER
  72. /*
  73. * Prologue for the preempt and irqs off function tracers.
  74. *
  75. * Returns 1 if it is OK to continue, and data->disabled is
  76. * incremented.
  77. * 0 if the trace is to be ignored, and data->disabled
  78. * is kept the same.
  79. *
  80. * Note, this function is also used outside this ifdef but
  81. * inside the #ifdef of the function graph tracer below.
  82. * This is OK, since the function graph tracer is
  83. * dependent on the function tracer.
  84. */
  85. static int func_prolog_dec(struct trace_array *tr,
  86. struct trace_array_cpu **data,
  87. unsigned long *flags)
  88. {
  89. long disabled;
  90. int cpu;
  91. /*
  92. * Does not matter if we preempt. We test the flags
  93. * afterward, to see if irqs are disabled or not.
  94. * If we preempt and get a false positive, the flags
  95. * test will fail.
  96. */
  97. cpu = raw_smp_processor_id();
  98. if (likely(!per_cpu(tracing_cpu, cpu)))
  99. return 0;
  100. local_save_flags(*flags);
  101. /*
  102. * Slight chance to get a false positive on tracing_cpu,
  103. * although I'm starting to think there isn't a chance.
  104. * Leave this for now just to be paranoid.
  105. */
  106. if (!irqs_disabled_flags(*flags) && !preempt_count())
  107. return 0;
  108. *data = per_cpu_ptr(tr->array_buffer.data, cpu);
  109. disabled = atomic_inc_return(&(*data)->disabled);
  110. if (likely(disabled == 1))
  111. return 1;
  112. atomic_dec(&(*data)->disabled);
  113. return 0;
  114. }
  115. /*
  116. * irqsoff uses its own tracer function to keep the overhead down:
  117. */
  118. static void
  119. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
  120. struct ftrace_ops *op, struct ftrace_regs *fregs)
  121. {
  122. struct trace_array *tr = irqsoff_trace;
  123. struct trace_array_cpu *data;
  124. unsigned long flags;
  125. unsigned int trace_ctx;
  126. if (!func_prolog_dec(tr, &data, &flags))
  127. return;
  128. trace_ctx = tracing_gen_ctx_flags(flags);
  129. trace_function(tr, ip, parent_ip, trace_ctx);
  130. atomic_dec(&data->disabled);
  131. }
  132. #endif /* CONFIG_FUNCTION_TRACER */
  133. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  134. static int irqsoff_display_graph(struct trace_array *tr, int set)
  135. {
  136. int cpu;
  137. if (!(is_graph(tr) ^ set))
  138. return 0;
  139. stop_irqsoff_tracer(irqsoff_trace, !set);
  140. for_each_possible_cpu(cpu)
  141. per_cpu(tracing_cpu, cpu) = 0;
  142. tr->max_latency = 0;
  143. tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
  144. return start_irqsoff_tracer(irqsoff_trace, set);
  145. }
  146. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  147. {
  148. struct trace_array *tr = irqsoff_trace;
  149. struct trace_array_cpu *data;
  150. unsigned long flags;
  151. unsigned int trace_ctx;
  152. int ret;
  153. if (ftrace_graph_ignore_func(trace))
  154. return 0;
  155. /*
  156. * Do not trace a function if it's filtered by set_graph_notrace.
  157. * Make the index of ret stack negative to indicate that it should
  158. * ignore further functions. But it needs its own ret stack entry
  159. * to recover the original index in order to continue tracing after
  160. * returning from the function.
  161. */
  162. if (ftrace_graph_notrace_addr(trace->func))
  163. return 1;
  164. if (!func_prolog_dec(tr, &data, &flags))
  165. return 0;
  166. trace_ctx = tracing_gen_ctx_flags(flags);
  167. ret = __trace_graph_entry(tr, trace, trace_ctx);
  168. atomic_dec(&data->disabled);
  169. return ret;
  170. }
  171. static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
  172. {
  173. struct trace_array *tr = irqsoff_trace;
  174. struct trace_array_cpu *data;
  175. unsigned long flags;
  176. unsigned int trace_ctx;
  177. ftrace_graph_addr_finish(trace);
  178. if (!func_prolog_dec(tr, &data, &flags))
  179. return;
  180. trace_ctx = tracing_gen_ctx_flags(flags);
  181. __trace_graph_return(tr, trace, trace_ctx);
  182. atomic_dec(&data->disabled);
  183. }
  184. static struct fgraph_ops fgraph_ops = {
  185. .entryfunc = &irqsoff_graph_entry,
  186. .retfunc = &irqsoff_graph_return,
  187. };
  188. static void irqsoff_trace_open(struct trace_iterator *iter)
  189. {
  190. if (is_graph(iter->tr))
  191. graph_trace_open(iter);
  192. else
  193. iter->private = NULL;
  194. }
  195. static void irqsoff_trace_close(struct trace_iterator *iter)
  196. {
  197. if (iter->private)
  198. graph_trace_close(iter);
  199. }
  200. #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
  201. TRACE_GRAPH_PRINT_PROC | \
  202. TRACE_GRAPH_PRINT_REL_TIME | \
  203. TRACE_GRAPH_PRINT_DURATION)
  204. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  205. {
  206. /*
  207. * In graph mode call the graph tracer output function,
  208. * otherwise go with the TRACE_FN event handler
  209. */
  210. if (is_graph(iter->tr))
  211. return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  212. return TRACE_TYPE_UNHANDLED;
  213. }
  214. static void irqsoff_print_header(struct seq_file *s)
  215. {
  216. struct trace_array *tr = irqsoff_trace;
  217. if (is_graph(tr))
  218. print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  219. else
  220. trace_default_header(s);
  221. }
  222. static void
  223. __trace_function(struct trace_array *tr,
  224. unsigned long ip, unsigned long parent_ip,
  225. unsigned int trace_ctx)
  226. {
  227. if (is_graph(tr))
  228. trace_graph_function(tr, ip, parent_ip, trace_ctx);
  229. else
  230. trace_function(tr, ip, parent_ip, trace_ctx);
  231. }
  232. #else
  233. #define __trace_function trace_function
  234. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  235. {
  236. return TRACE_TYPE_UNHANDLED;
  237. }
  238. static void irqsoff_trace_open(struct trace_iterator *iter) { }
  239. static void irqsoff_trace_close(struct trace_iterator *iter) { }
  240. #ifdef CONFIG_FUNCTION_TRACER
  241. static void irqsoff_print_header(struct seq_file *s)
  242. {
  243. trace_default_header(s);
  244. }
  245. #else
  246. static void irqsoff_print_header(struct seq_file *s)
  247. {
  248. trace_latency_header(s);
  249. }
  250. #endif /* CONFIG_FUNCTION_TRACER */
  251. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  252. /*
  253. * Should this new latency be reported/recorded?
  254. */
  255. static bool report_latency(struct trace_array *tr, u64 delta)
  256. {
  257. if (tracing_thresh) {
  258. if (delta < tracing_thresh)
  259. return false;
  260. } else {
  261. if (delta <= tr->max_latency)
  262. return false;
  263. }
  264. return true;
  265. }
  266. static void
  267. check_critical_timing(struct trace_array *tr,
  268. struct trace_array_cpu *data,
  269. unsigned long parent_ip,
  270. int cpu)
  271. {
  272. u64 T0, T1, delta;
  273. unsigned long flags;
  274. unsigned int trace_ctx;
  275. T0 = data->preempt_timestamp;
  276. T1 = ftrace_now(cpu);
  277. delta = T1-T0;
  278. trace_ctx = tracing_gen_ctx();
  279. if (!report_latency(tr, delta))
  280. goto out;
  281. raw_spin_lock_irqsave(&max_trace_lock, flags);
  282. /* check if we are still the max latency */
  283. if (!report_latency(tr, delta))
  284. goto out_unlock;
  285. __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
  286. /* Skip 5 functions to get to the irq/preempt enable function */
  287. __trace_stack(tr, trace_ctx, 5);
  288. if (data->critical_sequence != max_sequence)
  289. goto out_unlock;
  290. data->critical_end = parent_ip;
  291. if (likely(!is_tracing_stopped())) {
  292. tr->max_latency = delta;
  293. update_max_tr_single(tr, current, cpu);
  294. }
  295. max_sequence++;
  296. out_unlock:
  297. raw_spin_unlock_irqrestore(&max_trace_lock, flags);
  298. out:
  299. data->critical_sequence = max_sequence;
  300. data->preempt_timestamp = ftrace_now(cpu);
  301. __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
  302. }
  303. static nokprobe_inline void
  304. start_critical_timing(unsigned long ip, unsigned long parent_ip)
  305. {
  306. int cpu;
  307. struct trace_array *tr = irqsoff_trace;
  308. struct trace_array_cpu *data;
  309. if (!tracer_enabled || !tracing_is_enabled())
  310. return;
  311. cpu = raw_smp_processor_id();
  312. if (per_cpu(tracing_cpu, cpu))
  313. return;
  314. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  315. if (unlikely(!data) || atomic_read(&data->disabled))
  316. return;
  317. atomic_inc(&data->disabled);
  318. data->critical_sequence = max_sequence;
  319. data->preempt_timestamp = ftrace_now(cpu);
  320. data->critical_start = parent_ip ? : ip;
  321. __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
  322. per_cpu(tracing_cpu, cpu) = 1;
  323. atomic_dec(&data->disabled);
  324. }
  325. static nokprobe_inline void
  326. stop_critical_timing(unsigned long ip, unsigned long parent_ip)
  327. {
  328. int cpu;
  329. struct trace_array *tr = irqsoff_trace;
  330. struct trace_array_cpu *data;
  331. unsigned int trace_ctx;
  332. cpu = raw_smp_processor_id();
  333. /* Always clear the tracing cpu on stopping the trace */
  334. if (unlikely(per_cpu(tracing_cpu, cpu)))
  335. per_cpu(tracing_cpu, cpu) = 0;
  336. else
  337. return;
  338. if (!tracer_enabled || !tracing_is_enabled())
  339. return;
  340. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  341. if (unlikely(!data) ||
  342. !data->critical_start || atomic_read(&data->disabled))
  343. return;
  344. atomic_inc(&data->disabled);
  345. trace_ctx = tracing_gen_ctx();
  346. __trace_function(tr, ip, parent_ip, trace_ctx);
  347. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  348. data->critical_start = 0;
  349. atomic_dec(&data->disabled);
  350. }
  351. /* start and stop critical timings used to for stoppage (in idle) */
  352. void start_critical_timings(void)
  353. {
  354. if (preempt_trace(preempt_count()) || irq_trace())
  355. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  356. }
  357. EXPORT_SYMBOL_GPL(start_critical_timings);
  358. NOKPROBE_SYMBOL(start_critical_timings);
  359. void stop_critical_timings(void)
  360. {
  361. if (preempt_trace(preempt_count()) || irq_trace())
  362. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  363. }
  364. EXPORT_SYMBOL_GPL(stop_critical_timings);
  365. NOKPROBE_SYMBOL(stop_critical_timings);
  366. #ifdef CONFIG_FUNCTION_TRACER
  367. static bool function_enabled;
  368. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  369. {
  370. int ret;
  371. /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
  372. if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
  373. return 0;
  374. if (graph)
  375. ret = register_ftrace_graph(&fgraph_ops);
  376. else
  377. ret = register_ftrace_function(tr->ops);
  378. if (!ret)
  379. function_enabled = true;
  380. return ret;
  381. }
  382. static void unregister_irqsoff_function(struct trace_array *tr, int graph)
  383. {
  384. if (!function_enabled)
  385. return;
  386. if (graph)
  387. unregister_ftrace_graph(&fgraph_ops);
  388. else
  389. unregister_ftrace_function(tr->ops);
  390. function_enabled = false;
  391. }
  392. static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  393. {
  394. if (!(mask & TRACE_ITER_FUNCTION))
  395. return 0;
  396. if (set)
  397. register_irqsoff_function(tr, is_graph(tr), 1);
  398. else
  399. unregister_irqsoff_function(tr, is_graph(tr));
  400. return 1;
  401. }
  402. #else
  403. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  404. {
  405. return 0;
  406. }
  407. static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
  408. static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  409. {
  410. return 0;
  411. }
  412. #endif /* CONFIG_FUNCTION_TRACER */
  413. static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
  414. {
  415. struct tracer *tracer = tr->current_trace;
  416. if (irqsoff_function_set(tr, mask, set))
  417. return 0;
  418. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  419. if (mask & TRACE_ITER_DISPLAY_GRAPH)
  420. return irqsoff_display_graph(tr, set);
  421. #endif
  422. return trace_keep_overwrite(tracer, mask, set);
  423. }
  424. static int start_irqsoff_tracer(struct trace_array *tr, int graph)
  425. {
  426. int ret;
  427. ret = register_irqsoff_function(tr, graph, 0);
  428. if (!ret && tracing_is_enabled())
  429. tracer_enabled = 1;
  430. else
  431. tracer_enabled = 0;
  432. return ret;
  433. }
  434. static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
  435. {
  436. tracer_enabled = 0;
  437. unregister_irqsoff_function(tr, graph);
  438. }
  439. static bool irqsoff_busy;
  440. static int __irqsoff_tracer_init(struct trace_array *tr)
  441. {
  442. if (irqsoff_busy)
  443. return -EBUSY;
  444. save_flags = tr->trace_flags;
  445. /* non overwrite screws up the latency tracers */
  446. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  447. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
  448. /* without pause, we will produce garbage if another latency occurs */
  449. set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
  450. tr->max_latency = 0;
  451. irqsoff_trace = tr;
  452. /* make sure that the tracer is visible */
  453. smp_wmb();
  454. ftrace_init_array_ops(tr, irqsoff_tracer_call);
  455. /* Only toplevel instance supports graph tracing */
  456. if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  457. is_graph(tr))))
  458. printk(KERN_ERR "failed to start irqsoff tracer\n");
  459. irqsoff_busy = true;
  460. return 0;
  461. }
  462. static void __irqsoff_tracer_reset(struct trace_array *tr)
  463. {
  464. int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
  465. int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
  466. int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
  467. stop_irqsoff_tracer(tr, is_graph(tr));
  468. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  469. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
  470. set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
  471. ftrace_reset_array_ops(tr);
  472. irqsoff_busy = false;
  473. }
  474. static void irqsoff_tracer_start(struct trace_array *tr)
  475. {
  476. tracer_enabled = 1;
  477. }
  478. static void irqsoff_tracer_stop(struct trace_array *tr)
  479. {
  480. tracer_enabled = 0;
  481. }
  482. #ifdef CONFIG_IRQSOFF_TRACER
  483. /*
  484. * We are only interested in hardirq on/off events:
  485. */
  486. void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
  487. {
  488. if (!preempt_trace(preempt_count()) && irq_trace())
  489. stop_critical_timing(a0, a1);
  490. }
  491. NOKPROBE_SYMBOL(tracer_hardirqs_on);
  492. void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
  493. {
  494. if (!preempt_trace(preempt_count()) && irq_trace())
  495. start_critical_timing(a0, a1);
  496. }
  497. NOKPROBE_SYMBOL(tracer_hardirqs_off);
  498. static int irqsoff_tracer_init(struct trace_array *tr)
  499. {
  500. trace_type = TRACER_IRQS_OFF;
  501. return __irqsoff_tracer_init(tr);
  502. }
  503. static void irqsoff_tracer_reset(struct trace_array *tr)
  504. {
  505. __irqsoff_tracer_reset(tr);
  506. }
  507. static struct tracer irqsoff_tracer __read_mostly =
  508. {
  509. .name = "irqsoff",
  510. .init = irqsoff_tracer_init,
  511. .reset = irqsoff_tracer_reset,
  512. .start = irqsoff_tracer_start,
  513. .stop = irqsoff_tracer_stop,
  514. .print_max = true,
  515. .print_header = irqsoff_print_header,
  516. .print_line = irqsoff_print_line,
  517. .flag_changed = irqsoff_flag_changed,
  518. #ifdef CONFIG_FTRACE_SELFTEST
  519. .selftest = trace_selftest_startup_irqsoff,
  520. #endif
  521. .open = irqsoff_trace_open,
  522. .close = irqsoff_trace_close,
  523. .allow_instances = true,
  524. .use_max_tr = true,
  525. };
  526. #endif /* CONFIG_IRQSOFF_TRACER */
  527. #ifdef CONFIG_PREEMPT_TRACER
  528. void tracer_preempt_on(unsigned long a0, unsigned long a1)
  529. {
  530. if (preempt_trace(preempt_count()) && !irq_trace())
  531. stop_critical_timing(a0, a1);
  532. }
  533. void tracer_preempt_off(unsigned long a0, unsigned long a1)
  534. {
  535. if (preempt_trace(preempt_count()) && !irq_trace())
  536. start_critical_timing(a0, a1);
  537. }
  538. static int preemptoff_tracer_init(struct trace_array *tr)
  539. {
  540. trace_type = TRACER_PREEMPT_OFF;
  541. return __irqsoff_tracer_init(tr);
  542. }
  543. static void preemptoff_tracer_reset(struct trace_array *tr)
  544. {
  545. __irqsoff_tracer_reset(tr);
  546. }
  547. static struct tracer preemptoff_tracer __read_mostly =
  548. {
  549. .name = "preemptoff",
  550. .init = preemptoff_tracer_init,
  551. .reset = preemptoff_tracer_reset,
  552. .start = irqsoff_tracer_start,
  553. .stop = irqsoff_tracer_stop,
  554. .print_max = true,
  555. .print_header = irqsoff_print_header,
  556. .print_line = irqsoff_print_line,
  557. .flag_changed = irqsoff_flag_changed,
  558. #ifdef CONFIG_FTRACE_SELFTEST
  559. .selftest = trace_selftest_startup_preemptoff,
  560. #endif
  561. .open = irqsoff_trace_open,
  562. .close = irqsoff_trace_close,
  563. .allow_instances = true,
  564. .use_max_tr = true,
  565. };
  566. #endif /* CONFIG_PREEMPT_TRACER */
  567. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  568. static int preemptirqsoff_tracer_init(struct trace_array *tr)
  569. {
  570. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  571. return __irqsoff_tracer_init(tr);
  572. }
  573. static void preemptirqsoff_tracer_reset(struct trace_array *tr)
  574. {
  575. __irqsoff_tracer_reset(tr);
  576. }
  577. static struct tracer preemptirqsoff_tracer __read_mostly =
  578. {
  579. .name = "preemptirqsoff",
  580. .init = preemptirqsoff_tracer_init,
  581. .reset = preemptirqsoff_tracer_reset,
  582. .start = irqsoff_tracer_start,
  583. .stop = irqsoff_tracer_stop,
  584. .print_max = true,
  585. .print_header = irqsoff_print_header,
  586. .print_line = irqsoff_print_line,
  587. .flag_changed = irqsoff_flag_changed,
  588. #ifdef CONFIG_FTRACE_SELFTEST
  589. .selftest = trace_selftest_startup_preemptirqsoff,
  590. #endif
  591. .open = irqsoff_trace_open,
  592. .close = irqsoff_trace_close,
  593. .allow_instances = true,
  594. .use_max_tr = true,
  595. };
  596. #endif
  597. __init static int init_irqsoff_tracer(void)
  598. {
  599. #ifdef CONFIG_IRQSOFF_TRACER
  600. register_tracer(&irqsoff_tracer);
  601. #endif
  602. #ifdef CONFIG_PREEMPT_TRACER
  603. register_tracer(&preemptoff_tracer);
  604. #endif
  605. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  606. register_tracer(&preemptirqsoff_tracer);
  607. #endif
  608. return 0;
  609. }
  610. core_initcall(init_irqsoff_tracer);
  611. #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */