trace_functions_graph.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. *
  4. * Function graph tracer.
  5. * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]>
  6. * Mostly borrowed from function tracer which
  7. * is Copyright (c) Steven Rostedt <[email protected]>
  8. *
  9. */
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/slab.h>
  14. #include <linux/fs.h>
  15. #include "trace.h"
  16. #include "trace_output.h"
  17. /* When set, irq functions will be ignored */
  18. static int ftrace_graph_skip_irqs;
  19. struct fgraph_cpu_data {
  20. pid_t last_pid;
  21. int depth;
  22. int depth_irq;
  23. int ignore;
  24. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  25. };
  26. struct fgraph_data {
  27. struct fgraph_cpu_data __percpu *cpu_data;
  28. /* Place to preserve last processed entry. */
  29. struct ftrace_graph_ent_entry ent;
  30. struct ftrace_graph_ret_entry ret;
  31. int failed;
  32. int cpu;
  33. };
  34. #define TRACE_GRAPH_INDENT 2
  35. unsigned int fgraph_max_depth;
  36. static struct tracer_opt trace_opts[] = {
  37. /* Display overruns? (for self-debug purpose) */
  38. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  39. /* Display CPU ? */
  40. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  41. /* Display Overhead ? */
  42. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  43. /* Display proc name/pid */
  44. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  45. /* Display duration of execution */
  46. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  47. /* Display absolute time of an entry */
  48. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  49. /* Display interrupts */
  50. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  51. /* Display function name after trailing } */
  52. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  53. /* Include sleep time (scheduled out) between entry and return */
  54. { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  55. #ifdef CONFIG_FUNCTION_PROFILER
  56. /* Include time within nested functions */
  57. { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  58. #endif
  59. { } /* Empty entry */
  60. };
  61. static struct tracer_flags tracer_flags = {
  62. /* Don't display overruns, proc, or tail by default */
  63. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  64. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  65. TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  66. .opts = trace_opts
  67. };
  68. static struct trace_array *graph_array;
  69. /*
  70. * DURATION column is being also used to display IRQ signs,
  71. * following values are used by print_graph_irq and others
  72. * to fill in space into DURATION column.
  73. */
  74. enum {
  75. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  76. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  77. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  78. };
  79. static void
  80. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  81. struct trace_seq *s, u32 flags);
  82. int __trace_graph_entry(struct trace_array *tr,
  83. struct ftrace_graph_ent *trace,
  84. unsigned int trace_ctx)
  85. {
  86. struct trace_event_call *call = &event_funcgraph_entry;
  87. struct ring_buffer_event *event;
  88. struct trace_buffer *buffer = tr->array_buffer.buffer;
  89. struct ftrace_graph_ent_entry *entry;
  90. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  91. sizeof(*entry), trace_ctx);
  92. if (!event)
  93. return 0;
  94. entry = ring_buffer_event_data(event);
  95. entry->graph_ent = *trace;
  96. if (!call_filter_check_discard(call, entry, buffer, event))
  97. trace_buffer_unlock_commit_nostack(buffer, event);
  98. return 1;
  99. }
  100. static inline int ftrace_graph_ignore_irqs(void)
  101. {
  102. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  103. return 0;
  104. return in_hardirq();
  105. }
  106. int trace_graph_entry(struct ftrace_graph_ent *trace)
  107. {
  108. struct trace_array *tr = graph_array;
  109. struct trace_array_cpu *data;
  110. unsigned long flags;
  111. unsigned int trace_ctx;
  112. long disabled;
  113. int ret;
  114. int cpu;
  115. if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
  116. return 0;
  117. /*
  118. * Do not trace a function if it's filtered by set_graph_notrace.
  119. * Make the index of ret stack negative to indicate that it should
  120. * ignore further functions. But it needs its own ret stack entry
  121. * to recover the original index in order to continue tracing after
  122. * returning from the function.
  123. */
  124. if (ftrace_graph_notrace_addr(trace->func)) {
  125. trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
  126. /*
  127. * Need to return 1 to have the return called
  128. * that will clear the NOTRACE bit.
  129. */
  130. return 1;
  131. }
  132. if (!ftrace_trace_task(tr))
  133. return 0;
  134. if (ftrace_graph_ignore_func(trace))
  135. return 0;
  136. if (ftrace_graph_ignore_irqs())
  137. return 0;
  138. /*
  139. * Stop here if tracing_threshold is set. We only write function return
  140. * events to the ring buffer.
  141. */
  142. if (tracing_thresh)
  143. return 1;
  144. local_irq_save(flags);
  145. cpu = raw_smp_processor_id();
  146. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  147. disabled = atomic_inc_return(&data->disabled);
  148. if (likely(disabled == 1)) {
  149. trace_ctx = tracing_gen_ctx_flags(flags);
  150. ret = __trace_graph_entry(tr, trace, trace_ctx);
  151. } else {
  152. ret = 0;
  153. }
  154. atomic_dec(&data->disabled);
  155. local_irq_restore(flags);
  156. return ret;
  157. }
  158. static void
  159. __trace_graph_function(struct trace_array *tr,
  160. unsigned long ip, unsigned int trace_ctx)
  161. {
  162. u64 time = trace_clock_local();
  163. struct ftrace_graph_ent ent = {
  164. .func = ip,
  165. .depth = 0,
  166. };
  167. struct ftrace_graph_ret ret = {
  168. .func = ip,
  169. .depth = 0,
  170. .calltime = time,
  171. .rettime = time,
  172. };
  173. __trace_graph_entry(tr, &ent, trace_ctx);
  174. __trace_graph_return(tr, &ret, trace_ctx);
  175. }
  176. void
  177. trace_graph_function(struct trace_array *tr,
  178. unsigned long ip, unsigned long parent_ip,
  179. unsigned int trace_ctx)
  180. {
  181. __trace_graph_function(tr, ip, trace_ctx);
  182. }
  183. void __trace_graph_return(struct trace_array *tr,
  184. struct ftrace_graph_ret *trace,
  185. unsigned int trace_ctx)
  186. {
  187. struct trace_event_call *call = &event_funcgraph_exit;
  188. struct ring_buffer_event *event;
  189. struct trace_buffer *buffer = tr->array_buffer.buffer;
  190. struct ftrace_graph_ret_entry *entry;
  191. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  192. sizeof(*entry), trace_ctx);
  193. if (!event)
  194. return;
  195. entry = ring_buffer_event_data(event);
  196. entry->ret = *trace;
  197. if (!call_filter_check_discard(call, entry, buffer, event))
  198. trace_buffer_unlock_commit_nostack(buffer, event);
  199. }
  200. void trace_graph_return(struct ftrace_graph_ret *trace)
  201. {
  202. struct trace_array *tr = graph_array;
  203. struct trace_array_cpu *data;
  204. unsigned long flags;
  205. unsigned int trace_ctx;
  206. long disabled;
  207. int cpu;
  208. ftrace_graph_addr_finish(trace);
  209. if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
  210. trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
  211. return;
  212. }
  213. local_irq_save(flags);
  214. cpu = raw_smp_processor_id();
  215. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  216. disabled = atomic_inc_return(&data->disabled);
  217. if (likely(disabled == 1)) {
  218. trace_ctx = tracing_gen_ctx_flags(flags);
  219. __trace_graph_return(tr, trace, trace_ctx);
  220. }
  221. atomic_dec(&data->disabled);
  222. local_irq_restore(flags);
  223. }
  224. void set_graph_array(struct trace_array *tr)
  225. {
  226. graph_array = tr;
  227. /* Make graph_array visible before we start tracing */
  228. smp_mb();
  229. }
  230. static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  231. {
  232. ftrace_graph_addr_finish(trace);
  233. if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
  234. trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
  235. return;
  236. }
  237. if (tracing_thresh &&
  238. (trace->rettime - trace->calltime < tracing_thresh))
  239. return;
  240. else
  241. trace_graph_return(trace);
  242. }
  243. static struct fgraph_ops funcgraph_thresh_ops = {
  244. .entryfunc = &trace_graph_entry,
  245. .retfunc = &trace_graph_thresh_return,
  246. };
  247. static struct fgraph_ops funcgraph_ops = {
  248. .entryfunc = &trace_graph_entry,
  249. .retfunc = &trace_graph_return,
  250. };
  251. static int graph_trace_init(struct trace_array *tr)
  252. {
  253. int ret;
  254. set_graph_array(tr);
  255. if (tracing_thresh)
  256. ret = register_ftrace_graph(&funcgraph_thresh_ops);
  257. else
  258. ret = register_ftrace_graph(&funcgraph_ops);
  259. if (ret)
  260. return ret;
  261. tracing_start_cmdline_record();
  262. return 0;
  263. }
  264. static void graph_trace_reset(struct trace_array *tr)
  265. {
  266. tracing_stop_cmdline_record();
  267. if (tracing_thresh)
  268. unregister_ftrace_graph(&funcgraph_thresh_ops);
  269. else
  270. unregister_ftrace_graph(&funcgraph_ops);
  271. }
  272. static int graph_trace_update_thresh(struct trace_array *tr)
  273. {
  274. graph_trace_reset(tr);
  275. return graph_trace_init(tr);
  276. }
  277. static int max_bytes_for_cpu;
  278. static void print_graph_cpu(struct trace_seq *s, int cpu)
  279. {
  280. /*
  281. * Start with a space character - to make it stand out
  282. * to the right a bit when trace output is pasted into
  283. * email:
  284. */
  285. trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  286. }
  287. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  288. static void print_graph_proc(struct trace_seq *s, pid_t pid)
  289. {
  290. char comm[TASK_COMM_LEN];
  291. /* sign + log10(MAX_INT) + '\0' */
  292. char pid_str[11];
  293. int spaces = 0;
  294. int len;
  295. int i;
  296. trace_find_cmdline(pid, comm);
  297. comm[7] = '\0';
  298. sprintf(pid_str, "%d", pid);
  299. /* 1 stands for the "-" character */
  300. len = strlen(comm) + strlen(pid_str) + 1;
  301. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  302. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  303. /* First spaces to align center */
  304. for (i = 0; i < spaces / 2; i++)
  305. trace_seq_putc(s, ' ');
  306. trace_seq_printf(s, "%s-%s", comm, pid_str);
  307. /* Last spaces to align center */
  308. for (i = 0; i < spaces - (spaces / 2); i++)
  309. trace_seq_putc(s, ' ');
  310. }
  311. static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  312. {
  313. trace_seq_putc(s, ' ');
  314. trace_print_lat_fmt(s, entry);
  315. trace_seq_puts(s, " | ");
  316. }
  317. /* If the pid changed since the last trace, output this event */
  318. static void
  319. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  320. {
  321. pid_t prev_pid;
  322. pid_t *last_pid;
  323. if (!data)
  324. return;
  325. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  326. if (*last_pid == pid)
  327. return;
  328. prev_pid = *last_pid;
  329. *last_pid = pid;
  330. if (prev_pid == -1)
  331. return;
  332. /*
  333. * Context-switch trace line:
  334. ------------------------------------------
  335. | 1) migration/0--1 => sshd-1755
  336. ------------------------------------------
  337. */
  338. trace_seq_puts(s, " ------------------------------------------\n");
  339. print_graph_cpu(s, cpu);
  340. print_graph_proc(s, prev_pid);
  341. trace_seq_puts(s, " => ");
  342. print_graph_proc(s, pid);
  343. trace_seq_puts(s, "\n ------------------------------------------\n\n");
  344. }
  345. static struct ftrace_graph_ret_entry *
  346. get_return_for_leaf(struct trace_iterator *iter,
  347. struct ftrace_graph_ent_entry *curr)
  348. {
  349. struct fgraph_data *data = iter->private;
  350. struct ring_buffer_iter *ring_iter = NULL;
  351. struct ring_buffer_event *event;
  352. struct ftrace_graph_ret_entry *next;
  353. /*
  354. * If the previous output failed to write to the seq buffer,
  355. * then we just reuse the data from before.
  356. */
  357. if (data && data->failed) {
  358. curr = &data->ent;
  359. next = &data->ret;
  360. } else {
  361. ring_iter = trace_buffer_iter(iter, iter->cpu);
  362. /* First peek to compare current entry and the next one */
  363. if (ring_iter)
  364. event = ring_buffer_iter_peek(ring_iter, NULL);
  365. else {
  366. /*
  367. * We need to consume the current entry to see
  368. * the next one.
  369. */
  370. ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
  371. NULL, NULL);
  372. event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
  373. NULL, NULL);
  374. }
  375. if (!event)
  376. return NULL;
  377. next = ring_buffer_event_data(event);
  378. if (data) {
  379. /*
  380. * Save current and next entries for later reference
  381. * if the output fails.
  382. */
  383. data->ent = *curr;
  384. /*
  385. * If the next event is not a return type, then
  386. * we only care about what type it is. Otherwise we can
  387. * safely copy the entire event.
  388. */
  389. if (next->ent.type == TRACE_GRAPH_RET)
  390. data->ret = *next;
  391. else
  392. data->ret.ent.type = next->ent.type;
  393. }
  394. }
  395. if (next->ent.type != TRACE_GRAPH_RET)
  396. return NULL;
  397. if (curr->ent.pid != next->ent.pid ||
  398. curr->graph_ent.func != next->ret.func)
  399. return NULL;
  400. /* this is a leaf, now advance the iterator */
  401. if (ring_iter)
  402. ring_buffer_iter_advance(ring_iter);
  403. return next;
  404. }
  405. static void print_graph_abs_time(u64 t, struct trace_seq *s)
  406. {
  407. unsigned long usecs_rem;
  408. usecs_rem = do_div(t, NSEC_PER_SEC);
  409. usecs_rem /= 1000;
  410. trace_seq_printf(s, "%5lu.%06lu | ",
  411. (unsigned long)t, usecs_rem);
  412. }
  413. static void
  414. print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
  415. {
  416. unsigned long long usecs;
  417. usecs = iter->ts - iter->array_buffer->time_start;
  418. do_div(usecs, NSEC_PER_USEC);
  419. trace_seq_printf(s, "%9llu us | ", usecs);
  420. }
  421. static void
  422. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  423. enum trace_type type, int cpu, pid_t pid, u32 flags)
  424. {
  425. struct trace_array *tr = iter->tr;
  426. struct trace_seq *s = &iter->seq;
  427. struct trace_entry *ent = iter->ent;
  428. if (addr < (unsigned long)__irqentry_text_start ||
  429. addr >= (unsigned long)__irqentry_text_end)
  430. return;
  431. if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
  432. /* Absolute time */
  433. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  434. print_graph_abs_time(iter->ts, s);
  435. /* Relative time */
  436. if (flags & TRACE_GRAPH_PRINT_REL_TIME)
  437. print_graph_rel_time(iter, s);
  438. /* Cpu */
  439. if (flags & TRACE_GRAPH_PRINT_CPU)
  440. print_graph_cpu(s, cpu);
  441. /* Proc */
  442. if (flags & TRACE_GRAPH_PRINT_PROC) {
  443. print_graph_proc(s, pid);
  444. trace_seq_puts(s, " | ");
  445. }
  446. /* Latency format */
  447. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  448. print_graph_lat_fmt(s, ent);
  449. }
  450. /* No overhead */
  451. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
  452. if (type == TRACE_GRAPH_ENT)
  453. trace_seq_puts(s, "==========>");
  454. else
  455. trace_seq_puts(s, "<==========");
  456. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
  457. trace_seq_putc(s, '\n');
  458. }
  459. void
  460. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  461. {
  462. unsigned long nsecs_rem = do_div(duration, 1000);
  463. /* log10(ULONG_MAX) + '\0' */
  464. char usecs_str[21];
  465. char nsecs_str[5];
  466. int len;
  467. int i;
  468. sprintf(usecs_str, "%lu", (unsigned long) duration);
  469. /* Print msecs */
  470. trace_seq_printf(s, "%s", usecs_str);
  471. len = strlen(usecs_str);
  472. /* Print nsecs (we don't want to exceed 7 numbers) */
  473. if (len < 7) {
  474. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  475. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  476. trace_seq_printf(s, ".%s", nsecs_str);
  477. len += strlen(nsecs_str) + 1;
  478. }
  479. trace_seq_puts(s, " us ");
  480. /* Print remaining spaces to fit the row's width */
  481. for (i = len; i < 8; i++)
  482. trace_seq_putc(s, ' ');
  483. }
  484. static void
  485. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  486. struct trace_seq *s, u32 flags)
  487. {
  488. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  489. !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  490. return;
  491. /* No real adata, just filling the column with spaces */
  492. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  493. case FLAGS_FILL_FULL:
  494. trace_seq_puts(s, " | ");
  495. return;
  496. case FLAGS_FILL_START:
  497. trace_seq_puts(s, " ");
  498. return;
  499. case FLAGS_FILL_END:
  500. trace_seq_puts(s, " |");
  501. return;
  502. }
  503. /* Signal a overhead of time execution to the output */
  504. if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
  505. trace_seq_printf(s, "%c ", trace_find_mark(duration));
  506. else
  507. trace_seq_puts(s, " ");
  508. trace_print_graph_duration(duration, s);
  509. trace_seq_puts(s, "| ");
  510. }
  511. /* Case of a leaf function on its call entry */
  512. static enum print_line_t
  513. print_graph_entry_leaf(struct trace_iterator *iter,
  514. struct ftrace_graph_ent_entry *entry,
  515. struct ftrace_graph_ret_entry *ret_entry,
  516. struct trace_seq *s, u32 flags)
  517. {
  518. struct fgraph_data *data = iter->private;
  519. struct trace_array *tr = iter->tr;
  520. struct ftrace_graph_ret *graph_ret;
  521. struct ftrace_graph_ent *call;
  522. unsigned long long duration;
  523. int cpu = iter->cpu;
  524. int i;
  525. graph_ret = &ret_entry->ret;
  526. call = &entry->graph_ent;
  527. duration = graph_ret->rettime - graph_ret->calltime;
  528. if (data) {
  529. struct fgraph_cpu_data *cpu_data;
  530. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  531. /*
  532. * Comments display at + 1 to depth. Since
  533. * this is a leaf function, keep the comments
  534. * equal to this depth.
  535. */
  536. cpu_data->depth = call->depth - 1;
  537. /* No need to keep this function around for this depth */
  538. if (call->depth < FTRACE_RETFUNC_DEPTH &&
  539. !WARN_ON_ONCE(call->depth < 0))
  540. cpu_data->enter_funcs[call->depth] = 0;
  541. }
  542. /* Overhead and duration */
  543. print_graph_duration(tr, duration, s, flags);
  544. /* Function */
  545. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  546. trace_seq_putc(s, ' ');
  547. trace_seq_printf(s, "%ps();\n", (void *)call->func);
  548. print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
  549. cpu, iter->ent->pid, flags);
  550. return trace_handle_return(s);
  551. }
  552. static enum print_line_t
  553. print_graph_entry_nested(struct trace_iterator *iter,
  554. struct ftrace_graph_ent_entry *entry,
  555. struct trace_seq *s, int cpu, u32 flags)
  556. {
  557. struct ftrace_graph_ent *call = &entry->graph_ent;
  558. struct fgraph_data *data = iter->private;
  559. struct trace_array *tr = iter->tr;
  560. int i;
  561. if (data) {
  562. struct fgraph_cpu_data *cpu_data;
  563. int cpu = iter->cpu;
  564. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  565. cpu_data->depth = call->depth;
  566. /* Save this function pointer to see if the exit matches */
  567. if (call->depth < FTRACE_RETFUNC_DEPTH &&
  568. !WARN_ON_ONCE(call->depth < 0))
  569. cpu_data->enter_funcs[call->depth] = call->func;
  570. }
  571. /* No time */
  572. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  573. /* Function */
  574. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  575. trace_seq_putc(s, ' ');
  576. trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  577. if (trace_seq_has_overflowed(s))
  578. return TRACE_TYPE_PARTIAL_LINE;
  579. /*
  580. * we already consumed the current entry to check the next one
  581. * and see if this is a leaf.
  582. */
  583. return TRACE_TYPE_NO_CONSUME;
  584. }
  585. static void
  586. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  587. int type, unsigned long addr, u32 flags)
  588. {
  589. struct fgraph_data *data = iter->private;
  590. struct trace_entry *ent = iter->ent;
  591. struct trace_array *tr = iter->tr;
  592. int cpu = iter->cpu;
  593. /* Pid */
  594. verif_pid(s, ent->pid, cpu, data);
  595. if (type)
  596. /* Interrupt */
  597. print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  598. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  599. return;
  600. /* Absolute time */
  601. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  602. print_graph_abs_time(iter->ts, s);
  603. /* Relative time */
  604. if (flags & TRACE_GRAPH_PRINT_REL_TIME)
  605. print_graph_rel_time(iter, s);
  606. /* Cpu */
  607. if (flags & TRACE_GRAPH_PRINT_CPU)
  608. print_graph_cpu(s, cpu);
  609. /* Proc */
  610. if (flags & TRACE_GRAPH_PRINT_PROC) {
  611. print_graph_proc(s, ent->pid);
  612. trace_seq_puts(s, " | ");
  613. }
  614. /* Latency format */
  615. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  616. print_graph_lat_fmt(s, ent);
  617. return;
  618. }
  619. /*
  620. * Entry check for irq code
  621. *
  622. * returns 1 if
  623. * - we are inside irq code
  624. * - we just entered irq code
  625. *
  626. * returns 0 if
  627. * - funcgraph-interrupts option is set
  628. * - we are not inside irq code
  629. */
  630. static int
  631. check_irq_entry(struct trace_iterator *iter, u32 flags,
  632. unsigned long addr, int depth)
  633. {
  634. int cpu = iter->cpu;
  635. int *depth_irq;
  636. struct fgraph_data *data = iter->private;
  637. /*
  638. * If we are either displaying irqs, or we got called as
  639. * a graph event and private data does not exist,
  640. * then we bypass the irq check.
  641. */
  642. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  643. (!data))
  644. return 0;
  645. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  646. /*
  647. * We are inside the irq code
  648. */
  649. if (*depth_irq >= 0)
  650. return 1;
  651. if ((addr < (unsigned long)__irqentry_text_start) ||
  652. (addr >= (unsigned long)__irqentry_text_end))
  653. return 0;
  654. /*
  655. * We are entering irq code.
  656. */
  657. *depth_irq = depth;
  658. return 1;
  659. }
  660. /*
  661. * Return check for irq code
  662. *
  663. * returns 1 if
  664. * - we are inside irq code
  665. * - we just left irq code
  666. *
  667. * returns 0 if
  668. * - funcgraph-interrupts option is set
  669. * - we are not inside irq code
  670. */
  671. static int
  672. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  673. {
  674. int cpu = iter->cpu;
  675. int *depth_irq;
  676. struct fgraph_data *data = iter->private;
  677. /*
  678. * If we are either displaying irqs, or we got called as
  679. * a graph event and private data does not exist,
  680. * then we bypass the irq check.
  681. */
  682. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  683. (!data))
  684. return 0;
  685. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  686. /*
  687. * We are not inside the irq code.
  688. */
  689. if (*depth_irq == -1)
  690. return 0;
  691. /*
  692. * We are inside the irq code, and this is returning entry.
  693. * Let's not trace it and clear the entry depth, since
  694. * we are out of irq code.
  695. *
  696. * This condition ensures that we 'leave the irq code' once
  697. * we are out of the entry depth. Thus protecting us from
  698. * the RETURN entry loss.
  699. */
  700. if (*depth_irq >= depth) {
  701. *depth_irq = -1;
  702. return 1;
  703. }
  704. /*
  705. * We are inside the irq code, and this is not the entry.
  706. */
  707. return 1;
  708. }
  709. static enum print_line_t
  710. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  711. struct trace_iterator *iter, u32 flags)
  712. {
  713. struct fgraph_data *data = iter->private;
  714. struct ftrace_graph_ent *call = &field->graph_ent;
  715. struct ftrace_graph_ret_entry *leaf_ret;
  716. static enum print_line_t ret;
  717. int cpu = iter->cpu;
  718. if (check_irq_entry(iter, flags, call->func, call->depth))
  719. return TRACE_TYPE_HANDLED;
  720. print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
  721. leaf_ret = get_return_for_leaf(iter, field);
  722. if (leaf_ret)
  723. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  724. else
  725. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  726. if (data) {
  727. /*
  728. * If we failed to write our output, then we need to make
  729. * note of it. Because we already consumed our entry.
  730. */
  731. if (s->full) {
  732. data->failed = 1;
  733. data->cpu = cpu;
  734. } else
  735. data->failed = 0;
  736. }
  737. return ret;
  738. }
  739. static enum print_line_t
  740. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  741. struct trace_entry *ent, struct trace_iterator *iter,
  742. u32 flags)
  743. {
  744. unsigned long long duration = trace->rettime - trace->calltime;
  745. struct fgraph_data *data = iter->private;
  746. struct trace_array *tr = iter->tr;
  747. pid_t pid = ent->pid;
  748. int cpu = iter->cpu;
  749. int func_match = 1;
  750. int i;
  751. if (check_irq_return(iter, flags, trace->depth))
  752. return TRACE_TYPE_HANDLED;
  753. if (data) {
  754. struct fgraph_cpu_data *cpu_data;
  755. int cpu = iter->cpu;
  756. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  757. /*
  758. * Comments display at + 1 to depth. This is the
  759. * return from a function, we now want the comments
  760. * to display at the same level of the bracket.
  761. */
  762. cpu_data->depth = trace->depth - 1;
  763. if (trace->depth < FTRACE_RETFUNC_DEPTH &&
  764. !WARN_ON_ONCE(trace->depth < 0)) {
  765. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  766. func_match = 0;
  767. cpu_data->enter_funcs[trace->depth] = 0;
  768. }
  769. }
  770. print_graph_prologue(iter, s, 0, 0, flags);
  771. /* Overhead and duration */
  772. print_graph_duration(tr, duration, s, flags);
  773. /* Closing brace */
  774. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
  775. trace_seq_putc(s, ' ');
  776. /*
  777. * If the return function does not have a matching entry,
  778. * then the entry was lost. Instead of just printing
  779. * the '}' and letting the user guess what function this
  780. * belongs to, write out the function name. Always do
  781. * that if the funcgraph-tail option is enabled.
  782. */
  783. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
  784. trace_seq_puts(s, "}\n");
  785. else
  786. trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  787. /* Overrun */
  788. if (flags & TRACE_GRAPH_PRINT_OVERRUN)
  789. trace_seq_printf(s, " (Overruns: %u)\n",
  790. trace->overrun);
  791. print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  792. cpu, pid, flags);
  793. return trace_handle_return(s);
  794. }
  795. static enum print_line_t
  796. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  797. struct trace_iterator *iter, u32 flags)
  798. {
  799. struct trace_array *tr = iter->tr;
  800. unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
  801. struct fgraph_data *data = iter->private;
  802. struct trace_event *event;
  803. int depth = 0;
  804. int ret;
  805. int i;
  806. if (data)
  807. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  808. print_graph_prologue(iter, s, 0, 0, flags);
  809. /* No time */
  810. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  811. /* Indentation */
  812. if (depth > 0)
  813. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
  814. trace_seq_putc(s, ' ');
  815. /* The comment */
  816. trace_seq_puts(s, "/* ");
  817. switch (iter->ent->type) {
  818. case TRACE_BPUTS:
  819. ret = trace_print_bputs_msg_only(iter);
  820. if (ret != TRACE_TYPE_HANDLED)
  821. return ret;
  822. break;
  823. case TRACE_BPRINT:
  824. ret = trace_print_bprintk_msg_only(iter);
  825. if (ret != TRACE_TYPE_HANDLED)
  826. return ret;
  827. break;
  828. case TRACE_PRINT:
  829. ret = trace_print_printk_msg_only(iter);
  830. if (ret != TRACE_TYPE_HANDLED)
  831. return ret;
  832. break;
  833. default:
  834. event = ftrace_find_event(ent->type);
  835. if (!event)
  836. return TRACE_TYPE_UNHANDLED;
  837. ret = event->funcs->trace(iter, sym_flags, event);
  838. if (ret != TRACE_TYPE_HANDLED)
  839. return ret;
  840. }
  841. if (trace_seq_has_overflowed(s))
  842. goto out;
  843. /* Strip ending newline */
  844. if (s->buffer[s->seq.len - 1] == '\n') {
  845. s->buffer[s->seq.len - 1] = '\0';
  846. s->seq.len--;
  847. }
  848. trace_seq_puts(s, " */\n");
  849. out:
  850. return trace_handle_return(s);
  851. }
  852. enum print_line_t
  853. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  854. {
  855. struct ftrace_graph_ent_entry *field;
  856. struct fgraph_data *data = iter->private;
  857. struct trace_entry *entry = iter->ent;
  858. struct trace_seq *s = &iter->seq;
  859. int cpu = iter->cpu;
  860. int ret;
  861. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  862. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  863. return TRACE_TYPE_HANDLED;
  864. }
  865. /*
  866. * If the last output failed, there's a possibility we need
  867. * to print out the missing entry which would never go out.
  868. */
  869. if (data && data->failed) {
  870. field = &data->ent;
  871. iter->cpu = data->cpu;
  872. ret = print_graph_entry(field, s, iter, flags);
  873. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  874. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  875. ret = TRACE_TYPE_NO_CONSUME;
  876. }
  877. iter->cpu = cpu;
  878. return ret;
  879. }
  880. switch (entry->type) {
  881. case TRACE_GRAPH_ENT: {
  882. /*
  883. * print_graph_entry() may consume the current event,
  884. * thus @field may become invalid, so we need to save it.
  885. * sizeof(struct ftrace_graph_ent_entry) is very small,
  886. * it can be safely saved at the stack.
  887. */
  888. struct ftrace_graph_ent_entry saved;
  889. trace_assign_type(field, entry);
  890. saved = *field;
  891. return print_graph_entry(&saved, s, iter, flags);
  892. }
  893. case TRACE_GRAPH_RET: {
  894. struct ftrace_graph_ret_entry *field;
  895. trace_assign_type(field, entry);
  896. return print_graph_return(&field->ret, s, entry, iter, flags);
  897. }
  898. case TRACE_STACK:
  899. case TRACE_FN:
  900. /* dont trace stack and functions as comments */
  901. return TRACE_TYPE_UNHANDLED;
  902. default:
  903. return print_graph_comment(s, entry, iter, flags);
  904. }
  905. return TRACE_TYPE_HANDLED;
  906. }
  907. static enum print_line_t
  908. print_graph_function(struct trace_iterator *iter)
  909. {
  910. return print_graph_function_flags(iter, tracer_flags.val);
  911. }
  912. static enum print_line_t
  913. print_graph_function_event(struct trace_iterator *iter, int flags,
  914. struct trace_event *event)
  915. {
  916. return print_graph_function(iter);
  917. }
  918. static void print_lat_header(struct seq_file *s, u32 flags)
  919. {
  920. static const char spaces[] = " " /* 16 spaces */
  921. " " /* 4 spaces */
  922. " "; /* 17 spaces */
  923. int size = 0;
  924. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  925. size += 16;
  926. if (flags & TRACE_GRAPH_PRINT_REL_TIME)
  927. size += 16;
  928. if (flags & TRACE_GRAPH_PRINT_CPU)
  929. size += 4;
  930. if (flags & TRACE_GRAPH_PRINT_PROC)
  931. size += 17;
  932. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  933. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  934. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  935. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  936. seq_printf(s, "#%.*s||| / \n", size, spaces);
  937. }
  938. static void __print_graph_headers_flags(struct trace_array *tr,
  939. struct seq_file *s, u32 flags)
  940. {
  941. int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
  942. if (lat)
  943. print_lat_header(s, flags);
  944. /* 1st line */
  945. seq_putc(s, '#');
  946. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  947. seq_puts(s, " TIME ");
  948. if (flags & TRACE_GRAPH_PRINT_REL_TIME)
  949. seq_puts(s, " REL TIME ");
  950. if (flags & TRACE_GRAPH_PRINT_CPU)
  951. seq_puts(s, " CPU");
  952. if (flags & TRACE_GRAPH_PRINT_PROC)
  953. seq_puts(s, " TASK/PID ");
  954. if (lat)
  955. seq_puts(s, "|||| ");
  956. if (flags & TRACE_GRAPH_PRINT_DURATION)
  957. seq_puts(s, " DURATION ");
  958. seq_puts(s, " FUNCTION CALLS\n");
  959. /* 2nd line */
  960. seq_putc(s, '#');
  961. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  962. seq_puts(s, " | ");
  963. if (flags & TRACE_GRAPH_PRINT_REL_TIME)
  964. seq_puts(s, " | ");
  965. if (flags & TRACE_GRAPH_PRINT_CPU)
  966. seq_puts(s, " | ");
  967. if (flags & TRACE_GRAPH_PRINT_PROC)
  968. seq_puts(s, " | | ");
  969. if (lat)
  970. seq_puts(s, "|||| ");
  971. if (flags & TRACE_GRAPH_PRINT_DURATION)
  972. seq_puts(s, " | | ");
  973. seq_puts(s, " | | | |\n");
  974. }
  975. static void print_graph_headers(struct seq_file *s)
  976. {
  977. print_graph_headers_flags(s, tracer_flags.val);
  978. }
  979. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  980. {
  981. struct trace_iterator *iter = s->private;
  982. struct trace_array *tr = iter->tr;
  983. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  984. return;
  985. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
  986. /* print nothing if the buffers are empty */
  987. if (trace_empty(iter))
  988. return;
  989. print_trace_header(s, iter);
  990. }
  991. __print_graph_headers_flags(tr, s, flags);
  992. }
  993. void graph_trace_open(struct trace_iterator *iter)
  994. {
  995. /* pid and depth on the last trace processed */
  996. struct fgraph_data *data;
  997. gfp_t gfpflags;
  998. int cpu;
  999. iter->private = NULL;
  1000. /* We can be called in atomic context via ftrace_dump() */
  1001. gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
  1002. data = kzalloc(sizeof(*data), gfpflags);
  1003. if (!data)
  1004. goto out_err;
  1005. data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
  1006. if (!data->cpu_data)
  1007. goto out_err_free;
  1008. for_each_possible_cpu(cpu) {
  1009. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1010. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1011. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1012. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1013. *pid = -1;
  1014. *depth = 0;
  1015. *ignore = 0;
  1016. *depth_irq = -1;
  1017. }
  1018. iter->private = data;
  1019. return;
  1020. out_err_free:
  1021. kfree(data);
  1022. out_err:
  1023. pr_warn("function graph tracer: not enough memory\n");
  1024. }
  1025. void graph_trace_close(struct trace_iterator *iter)
  1026. {
  1027. struct fgraph_data *data = iter->private;
  1028. if (data) {
  1029. free_percpu(data->cpu_data);
  1030. kfree(data);
  1031. }
  1032. }
  1033. static int
  1034. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1035. {
  1036. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1037. ftrace_graph_skip_irqs = !set;
  1038. if (bit == TRACE_GRAPH_SLEEP_TIME)
  1039. ftrace_graph_sleep_time_control(set);
  1040. if (bit == TRACE_GRAPH_GRAPH_TIME)
  1041. ftrace_graph_graph_time_control(set);
  1042. return 0;
  1043. }
  1044. static struct trace_event_functions graph_functions = {
  1045. .trace = print_graph_function_event,
  1046. };
  1047. static struct trace_event graph_trace_entry_event = {
  1048. .type = TRACE_GRAPH_ENT,
  1049. .funcs = &graph_functions,
  1050. };
  1051. static struct trace_event graph_trace_ret_event = {
  1052. .type = TRACE_GRAPH_RET,
  1053. .funcs = &graph_functions
  1054. };
  1055. static struct tracer graph_trace __tracer_data = {
  1056. .name = "function_graph",
  1057. .update_thresh = graph_trace_update_thresh,
  1058. .open = graph_trace_open,
  1059. .pipe_open = graph_trace_open,
  1060. .close = graph_trace_close,
  1061. .pipe_close = graph_trace_close,
  1062. .init = graph_trace_init,
  1063. .reset = graph_trace_reset,
  1064. .print_line = print_graph_function,
  1065. .print_header = print_graph_headers,
  1066. .flags = &tracer_flags,
  1067. .set_flag = func_graph_set_flag,
  1068. #ifdef CONFIG_FTRACE_SELFTEST
  1069. .selftest = trace_selftest_startup_function_graph,
  1070. #endif
  1071. };
  1072. static ssize_t
  1073. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1074. loff_t *ppos)
  1075. {
  1076. unsigned long val;
  1077. int ret;
  1078. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1079. if (ret)
  1080. return ret;
  1081. fgraph_max_depth = val;
  1082. *ppos += cnt;
  1083. return cnt;
  1084. }
  1085. static ssize_t
  1086. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1087. loff_t *ppos)
  1088. {
  1089. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1090. int n;
  1091. n = sprintf(buf, "%d\n", fgraph_max_depth);
  1092. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1093. }
  1094. static const struct file_operations graph_depth_fops = {
  1095. .open = tracing_open_generic,
  1096. .write = graph_depth_write,
  1097. .read = graph_depth_read,
  1098. .llseek = generic_file_llseek,
  1099. };
  1100. static __init int init_graph_tracefs(void)
  1101. {
  1102. int ret;
  1103. ret = tracing_init_dentry();
  1104. if (ret)
  1105. return 0;
  1106. trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
  1107. NULL, &graph_depth_fops);
  1108. return 0;
  1109. }
  1110. fs_initcall(init_graph_tracefs);
  1111. static __init int init_graph_trace(void)
  1112. {
  1113. max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
  1114. if (!register_trace_event(&graph_trace_entry_event)) {
  1115. pr_warn("Warning: could not register graph trace events\n");
  1116. return 1;
  1117. }
  1118. if (!register_trace_event(&graph_trace_ret_event)) {
  1119. pr_warn("Warning: could not register graph trace events\n");
  1120. return 1;
  1121. }
  1122. return register_tracer(&graph_trace);
  1123. }
  1124. core_initcall(init_graph_trace);