trace_boot.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * trace_boot.c
  4. * Tracing kernel boot-time
  5. */
  6. #define pr_fmt(fmt) "trace_boot: " fmt
  7. #include <linux/bootconfig.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/ftrace.h>
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/mutex.h>
  13. #include <linux/string.h>
  14. #include <linux/slab.h>
  15. #include <linux/trace.h>
  16. #include <linux/trace_events.h>
  17. #include "trace.h"
  18. #define MAX_BUF_LEN 256
  19. static void __init
  20. trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
  21. {
  22. struct xbc_node *anode;
  23. const char *p;
  24. char buf[MAX_BUF_LEN];
  25. unsigned long v = 0;
  26. /* Common ftrace options */
  27. xbc_node_for_each_array_value(node, "options", anode, p) {
  28. if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
  29. pr_err("String is too long: %s\n", p);
  30. continue;
  31. }
  32. if (trace_set_options(tr, buf) < 0)
  33. pr_err("Failed to set option: %s\n", buf);
  34. }
  35. p = xbc_node_find_value(node, "tracing_on", NULL);
  36. if (p && *p != '\0') {
  37. if (kstrtoul(p, 10, &v))
  38. pr_err("Failed to set tracing on: %s\n", p);
  39. if (v)
  40. tracer_tracing_on(tr);
  41. else
  42. tracer_tracing_off(tr);
  43. }
  44. p = xbc_node_find_value(node, "trace_clock", NULL);
  45. if (p && *p != '\0') {
  46. if (tracing_set_clock(tr, p) < 0)
  47. pr_err("Failed to set trace clock: %s\n", p);
  48. }
  49. p = xbc_node_find_value(node, "buffer_size", NULL);
  50. if (p && *p != '\0') {
  51. v = memparse(p, NULL);
  52. if (v < PAGE_SIZE)
  53. pr_err("Buffer size is too small: %s\n", p);
  54. if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0)
  55. pr_err("Failed to resize trace buffer to %s\n", p);
  56. }
  57. p = xbc_node_find_value(node, "cpumask", NULL);
  58. if (p && *p != '\0') {
  59. cpumask_var_t new_mask;
  60. if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
  61. if (cpumask_parse(p, new_mask) < 0 ||
  62. tracing_set_cpumask(tr, new_mask) < 0)
  63. pr_err("Failed to set new CPU mask %s\n", p);
  64. free_cpumask_var(new_mask);
  65. }
  66. }
  67. }
  68. #ifdef CONFIG_EVENT_TRACING
  69. static void __init
  70. trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
  71. {
  72. struct xbc_node *anode;
  73. char buf[MAX_BUF_LEN];
  74. const char *p;
  75. xbc_node_for_each_array_value(node, "events", anode, p) {
  76. if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
  77. pr_err("String is too long: %s\n", p);
  78. continue;
  79. }
  80. if (ftrace_set_clr_event(tr, buf, 1) < 0)
  81. pr_err("Failed to enable event: %s\n", p);
  82. }
  83. }
  84. #ifdef CONFIG_KPROBE_EVENTS
  85. static int __init
  86. trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
  87. {
  88. struct dynevent_cmd cmd;
  89. struct xbc_node *anode;
  90. char buf[MAX_BUF_LEN];
  91. const char *val;
  92. int ret = 0;
  93. xbc_node_for_each_array_value(node, "probes", anode, val) {
  94. kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
  95. ret = kprobe_event_gen_cmd_start(&cmd, event, val);
  96. if (ret) {
  97. pr_err("Failed to generate probe: %s\n", buf);
  98. break;
  99. }
  100. ret = kprobe_event_gen_cmd_end(&cmd);
  101. if (ret) {
  102. pr_err("Failed to add probe: %s\n", buf);
  103. break;
  104. }
  105. }
  106. return ret;
  107. }
  108. #else
  109. static inline int __init
  110. trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
  111. {
  112. pr_err("Kprobe event is not supported.\n");
  113. return -ENOTSUPP;
  114. }
  115. #endif
  116. #ifdef CONFIG_SYNTH_EVENTS
  117. static int __init
  118. trace_boot_add_synth_event(struct xbc_node *node, const char *event)
  119. {
  120. struct dynevent_cmd cmd;
  121. struct xbc_node *anode;
  122. char buf[MAX_BUF_LEN];
  123. const char *p;
  124. int ret;
  125. synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
  126. ret = synth_event_gen_cmd_start(&cmd, event, NULL);
  127. if (ret)
  128. return ret;
  129. xbc_node_for_each_array_value(node, "fields", anode, p) {
  130. ret = synth_event_add_field_str(&cmd, p);
  131. if (ret)
  132. return ret;
  133. }
  134. ret = synth_event_gen_cmd_end(&cmd);
  135. if (ret < 0)
  136. pr_err("Failed to add synthetic event: %s\n", buf);
  137. return ret;
  138. }
  139. #else
  140. static inline int __init
  141. trace_boot_add_synth_event(struct xbc_node *node, const char *event)
  142. {
  143. pr_err("Synthetic event is not supported.\n");
  144. return -ENOTSUPP;
  145. }
  146. #endif
  147. #ifdef CONFIG_HIST_TRIGGERS
  148. static int __init __printf(3, 4)
  149. append_printf(char **bufp, char *end, const char *fmt, ...)
  150. {
  151. va_list args;
  152. int ret;
  153. if (*bufp == end)
  154. return -ENOSPC;
  155. va_start(args, fmt);
  156. ret = vsnprintf(*bufp, end - *bufp, fmt, args);
  157. if (ret < end - *bufp) {
  158. *bufp += ret;
  159. } else {
  160. *bufp = end;
  161. ret = -ERANGE;
  162. }
  163. va_end(args);
  164. return ret;
  165. }
  166. static int __init
  167. append_str_nospace(char **bufp, char *end, const char *str)
  168. {
  169. char *p = *bufp;
  170. int len;
  171. while (p < end - 1 && *str != '\0') {
  172. if (!isspace(*str))
  173. *(p++) = *str;
  174. str++;
  175. }
  176. *p = '\0';
  177. if (p == end - 1) {
  178. *bufp = end;
  179. return -ENOSPC;
  180. }
  181. len = p - *bufp;
  182. *bufp = p;
  183. return (int)len;
  184. }
  185. static int __init
  186. trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp,
  187. char *end, const char *key)
  188. {
  189. struct xbc_node *anode;
  190. const char *p;
  191. char sep;
  192. p = xbc_node_find_value(hnode, key, &anode);
  193. if (p) {
  194. if (!anode) {
  195. pr_err("hist.%s requires value(s).\n", key);
  196. return -EINVAL;
  197. }
  198. append_printf(bufp, end, ":%s", key);
  199. sep = '=';
  200. xbc_array_for_each_value(anode, p) {
  201. append_printf(bufp, end, "%c%s", sep, p);
  202. if (sep == '=')
  203. sep = ',';
  204. }
  205. } else
  206. return -ENOENT;
  207. return 0;
  208. }
  209. static int __init
  210. trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp,
  211. char *end, const char *handler,
  212. const char *param)
  213. {
  214. struct xbc_node *knode, *anode;
  215. const char *p;
  216. char sep;
  217. /* Compose 'handler' parameter */
  218. p = xbc_node_find_value(hnode, param, NULL);
  219. if (!p) {
  220. pr_err("hist.%s requires '%s' option.\n",
  221. xbc_node_get_data(hnode), param);
  222. return -EINVAL;
  223. }
  224. append_printf(bufp, end, ":%s(%s)", handler, p);
  225. /* Compose 'action' parameter */
  226. knode = xbc_node_find_subkey(hnode, "trace");
  227. if (!knode)
  228. knode = xbc_node_find_subkey(hnode, "save");
  229. if (knode) {
  230. anode = xbc_node_get_child(knode);
  231. if (!anode || !xbc_node_is_value(anode)) {
  232. pr_err("hist.%s.%s requires value(s).\n",
  233. xbc_node_get_data(hnode),
  234. xbc_node_get_data(knode));
  235. return -EINVAL;
  236. }
  237. append_printf(bufp, end, ".%s", xbc_node_get_data(knode));
  238. sep = '(';
  239. xbc_array_for_each_value(anode, p) {
  240. append_printf(bufp, end, "%c%s", sep, p);
  241. if (sep == '(')
  242. sep = ',';
  243. }
  244. append_printf(bufp, end, ")");
  245. } else if (xbc_node_find_subkey(hnode, "snapshot")) {
  246. append_printf(bufp, end, ".snapshot()");
  247. } else {
  248. pr_err("hist.%s requires an action.\n",
  249. xbc_node_get_data(hnode));
  250. return -EINVAL;
  251. }
  252. return 0;
  253. }
  254. static int __init
  255. trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp,
  256. char *end, const char *param)
  257. {
  258. struct xbc_node *node;
  259. const char *p, *handler;
  260. int ret = 0;
  261. handler = xbc_node_get_data(hnode);
  262. xbc_node_for_each_subkey(hnode, node) {
  263. p = xbc_node_get_data(node);
  264. if (!isdigit(p[0]))
  265. continue;
  266. /* All digit started node should be instances. */
  267. ret = trace_boot_hist_add_one_handler(node, bufp, end, handler, param);
  268. if (ret < 0)
  269. break;
  270. }
  271. if (xbc_node_find_subkey(hnode, param))
  272. ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param);
  273. return ret;
  274. }
  275. /*
  276. * Histogram boottime tracing syntax.
  277. *
  278. * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist[.N] {
  279. * keys = <KEY>[,...]
  280. * values = <VAL>[,...]
  281. * sort = <SORT-KEY>[,...]
  282. * size = <ENTRIES>
  283. * name = <HISTNAME>
  284. * var { <VAR> = <EXPR> ... }
  285. * pause|continue|clear
  286. * onmax|onchange[.N] { var = <VAR>; <ACTION> [= <PARAM>] }
  287. * onmatch[.N] { event = <EVENT>; <ACTION> [= <PARAM>] }
  288. * filter = <FILTER>
  289. * }
  290. *
  291. * Where <ACTION> are;
  292. *
  293. * trace = <EVENT>, <ARG1>[, ...]
  294. * save = <ARG1>[, ...]
  295. * snapshot
  296. */
  297. static int __init
  298. trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size)
  299. {
  300. struct xbc_node *node, *knode;
  301. char *end = buf + size;
  302. const char *p;
  303. int ret = 0;
  304. append_printf(&buf, end, "hist");
  305. ret = trace_boot_hist_add_array(hnode, &buf, end, "keys");
  306. if (ret < 0) {
  307. if (ret == -ENOENT)
  308. pr_err("hist requires keys.\n");
  309. return -EINVAL;
  310. }
  311. ret = trace_boot_hist_add_array(hnode, &buf, end, "values");
  312. if (ret == -EINVAL)
  313. return ret;
  314. ret = trace_boot_hist_add_array(hnode, &buf, end, "sort");
  315. if (ret == -EINVAL)
  316. return ret;
  317. p = xbc_node_find_value(hnode, "size", NULL);
  318. if (p)
  319. append_printf(&buf, end, ":size=%s", p);
  320. p = xbc_node_find_value(hnode, "name", NULL);
  321. if (p)
  322. append_printf(&buf, end, ":name=%s", p);
  323. node = xbc_node_find_subkey(hnode, "var");
  324. if (node) {
  325. xbc_node_for_each_key_value(node, knode, p) {
  326. /* Expression must not include spaces. */
  327. append_printf(&buf, end, ":%s=",
  328. xbc_node_get_data(knode));
  329. append_str_nospace(&buf, end, p);
  330. }
  331. }
  332. /* Histogram control attributes (mutual exclusive) */
  333. if (xbc_node_find_value(hnode, "pause", NULL))
  334. append_printf(&buf, end, ":pause");
  335. else if (xbc_node_find_value(hnode, "continue", NULL))
  336. append_printf(&buf, end, ":continue");
  337. else if (xbc_node_find_value(hnode, "clear", NULL))
  338. append_printf(&buf, end, ":clear");
  339. /* Histogram handler and actions */
  340. node = xbc_node_find_subkey(hnode, "onmax");
  341. if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
  342. return -EINVAL;
  343. node = xbc_node_find_subkey(hnode, "onchange");
  344. if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
  345. return -EINVAL;
  346. node = xbc_node_find_subkey(hnode, "onmatch");
  347. if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0)
  348. return -EINVAL;
  349. p = xbc_node_find_value(hnode, "filter", NULL);
  350. if (p)
  351. append_printf(&buf, end, " if %s", p);
  352. if (buf == end) {
  353. pr_err("hist exceeds the max command length.\n");
  354. return -E2BIG;
  355. }
  356. return 0;
  357. }
  358. static void __init
  359. trace_boot_init_histograms(struct trace_event_file *file,
  360. struct xbc_node *hnode, char *buf, size_t size)
  361. {
  362. struct xbc_node *node;
  363. const char *p;
  364. char *tmp;
  365. xbc_node_for_each_subkey(hnode, node) {
  366. p = xbc_node_get_data(node);
  367. if (!isdigit(p[0]))
  368. continue;
  369. /* All digit started node should be instances. */
  370. if (trace_boot_compose_hist_cmd(node, buf, size) == 0) {
  371. tmp = kstrdup(buf, GFP_KERNEL);
  372. if (!tmp)
  373. return;
  374. if (trigger_process_regex(file, buf) < 0)
  375. pr_err("Failed to apply hist trigger: %s\n", tmp);
  376. kfree(tmp);
  377. }
  378. }
  379. if (xbc_node_find_subkey(hnode, "keys")) {
  380. if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) {
  381. tmp = kstrdup(buf, GFP_KERNEL);
  382. if (!tmp)
  383. return;
  384. if (trigger_process_regex(file, buf) < 0)
  385. pr_err("Failed to apply hist trigger: %s\n", tmp);
  386. kfree(tmp);
  387. }
  388. }
  389. }
  390. #else
  391. static void __init
  392. trace_boot_init_histograms(struct trace_event_file *file,
  393. struct xbc_node *hnode, char *buf, size_t size)
  394. {
  395. /* do nothing */
  396. }
  397. #endif
  398. static void __init
  399. trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
  400. struct xbc_node *enode)
  401. {
  402. struct trace_event_file *file;
  403. struct xbc_node *anode;
  404. char buf[MAX_BUF_LEN];
  405. const char *p, *group, *event;
  406. group = xbc_node_get_data(gnode);
  407. event = xbc_node_get_data(enode);
  408. if (!strcmp(group, "kprobes"))
  409. if (trace_boot_add_kprobe_event(enode, event) < 0)
  410. return;
  411. if (!strcmp(group, "synthetic"))
  412. if (trace_boot_add_synth_event(enode, event) < 0)
  413. return;
  414. mutex_lock(&event_mutex);
  415. file = find_event_file(tr, group, event);
  416. if (!file) {
  417. pr_err("Failed to find event: %s:%s\n", group, event);
  418. goto out;
  419. }
  420. p = xbc_node_find_value(enode, "filter", NULL);
  421. if (p && *p != '\0') {
  422. if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
  423. pr_err("filter string is too long: %s\n", p);
  424. else if (apply_event_filter(file, buf) < 0)
  425. pr_err("Failed to apply filter: %s\n", buf);
  426. }
  427. if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
  428. xbc_node_for_each_array_value(enode, "actions", anode, p) {
  429. if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
  430. pr_err("action string is too long: %s\n", p);
  431. else if (trigger_process_regex(file, buf) < 0)
  432. pr_err("Failed to apply an action: %s\n", p);
  433. }
  434. anode = xbc_node_find_subkey(enode, "hist");
  435. if (anode)
  436. trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf));
  437. } else if (xbc_node_find_value(enode, "actions", NULL))
  438. pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n");
  439. if (xbc_node_find_value(enode, "enable", NULL)) {
  440. if (trace_event_enable_disable(file, 1, 0) < 0)
  441. pr_err("Failed to enable event node: %s:%s\n",
  442. group, event);
  443. }
  444. out:
  445. mutex_unlock(&event_mutex);
  446. }
  447. static void __init
  448. trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
  449. {
  450. struct xbc_node *gnode, *enode;
  451. bool enable, enable_all = false;
  452. const char *data;
  453. node = xbc_node_find_subkey(node, "event");
  454. if (!node)
  455. return;
  456. /* per-event key starts with "event.GROUP.EVENT" */
  457. xbc_node_for_each_subkey(node, gnode) {
  458. data = xbc_node_get_data(gnode);
  459. if (!strcmp(data, "enable")) {
  460. enable_all = true;
  461. continue;
  462. }
  463. enable = false;
  464. xbc_node_for_each_subkey(gnode, enode) {
  465. data = xbc_node_get_data(enode);
  466. if (!strcmp(data, "enable")) {
  467. enable = true;
  468. continue;
  469. }
  470. trace_boot_init_one_event(tr, gnode, enode);
  471. }
  472. /* Event enablement must be done after event settings */
  473. if (enable) {
  474. data = xbc_node_get_data(gnode);
  475. trace_array_set_clr_event(tr, data, NULL, true);
  476. }
  477. }
  478. /* Ditto */
  479. if (enable_all)
  480. trace_array_set_clr_event(tr, NULL, NULL, true);
  481. }
  482. #else
  483. #define trace_boot_enable_events(tr, node) do {} while (0)
  484. #define trace_boot_init_events(tr, node) do {} while (0)
  485. #endif
  486. #ifdef CONFIG_DYNAMIC_FTRACE
  487. static void __init
  488. trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node)
  489. {
  490. struct xbc_node *anode;
  491. const char *p;
  492. char *q;
  493. xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) {
  494. q = kstrdup(p, GFP_KERNEL);
  495. if (!q)
  496. return;
  497. if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0)
  498. pr_err("Failed to add %s to ftrace filter\n", p);
  499. else
  500. ftrace_filter_param = true;
  501. kfree(q);
  502. }
  503. xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) {
  504. q = kstrdup(p, GFP_KERNEL);
  505. if (!q)
  506. return;
  507. if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0)
  508. pr_err("Failed to add %s to ftrace filter\n", p);
  509. else
  510. ftrace_filter_param = true;
  511. kfree(q);
  512. }
  513. }
  514. #else
  515. #define trace_boot_set_ftrace_filter(tr, node) do {} while (0)
  516. #endif
  517. static void __init
  518. trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
  519. {
  520. const char *p;
  521. trace_boot_set_ftrace_filter(tr, node);
  522. p = xbc_node_find_value(node, "tracer", NULL);
  523. if (p && *p != '\0') {
  524. if (tracing_set_tracer(tr, p) < 0)
  525. pr_err("Failed to set given tracer: %s\n", p);
  526. }
  527. /* Since tracer can free snapshot buffer, allocate snapshot here.*/
  528. if (xbc_node_find_value(node, "alloc_snapshot", NULL)) {
  529. if (tracing_alloc_snapshot_instance(tr) < 0)
  530. pr_err("Failed to allocate snapshot buffer\n");
  531. }
  532. }
  533. static void __init
  534. trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node)
  535. {
  536. trace_boot_set_instance_options(tr, node);
  537. trace_boot_init_events(tr, node);
  538. trace_boot_enable_events(tr, node);
  539. trace_boot_enable_tracer(tr, node);
  540. }
  541. static void __init
  542. trace_boot_init_instances(struct xbc_node *node)
  543. {
  544. struct xbc_node *inode;
  545. struct trace_array *tr;
  546. const char *p;
  547. node = xbc_node_find_subkey(node, "instance");
  548. if (!node)
  549. return;
  550. xbc_node_for_each_subkey(node, inode) {
  551. p = xbc_node_get_data(inode);
  552. if (!p || *p == '\0')
  553. continue;
  554. tr = trace_array_get_by_name(p);
  555. if (!tr) {
  556. pr_err("Failed to get trace instance %s\n", p);
  557. continue;
  558. }
  559. trace_boot_init_one_instance(tr, inode);
  560. trace_array_put(tr);
  561. }
  562. }
  563. static int __init trace_boot_init(void)
  564. {
  565. struct xbc_node *trace_node;
  566. struct trace_array *tr;
  567. trace_node = xbc_find_node("ftrace");
  568. if (!trace_node)
  569. return 0;
  570. tr = top_trace_array();
  571. if (!tr)
  572. return 0;
  573. /* Global trace array is also one instance */
  574. trace_boot_init_one_instance(tr, trace_node);
  575. trace_boot_init_instances(trace_node);
  576. disable_tracing_selftest("running boot-time tracing");
  577. return 0;
  578. }
  579. /*
  580. * Start tracing at the end of core-initcall, so that it starts tracing
  581. * from the beginning of postcore_initcall.
  582. */
  583. core_initcall_sync(trace_boot_init);