xlated_dumper.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
  2. /* Copyright (C) 2018 Netronome Systems, Inc. */
  3. #define _GNU_SOURCE
  4. #include <stdarg.h>
  5. #include <stdio.h>
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include <sys/types.h>
  9. #include <bpf/libbpf.h>
  10. #include <bpf/libbpf_internal.h>
  11. #include "disasm.h"
  12. #include "json_writer.h"
  13. #include "main.h"
  14. #include "xlated_dumper.h"
  15. static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
  16. {
  17. return ((struct kernel_sym *)sym_a)->address -
  18. ((struct kernel_sym *)sym_b)->address;
  19. }
  20. void kernel_syms_load(struct dump_data *dd)
  21. {
  22. struct kernel_sym *sym;
  23. char buff[256];
  24. void *tmp, *address;
  25. FILE *fp;
  26. fp = fopen("/proc/kallsyms", "r");
  27. if (!fp)
  28. return;
  29. while (fgets(buff, sizeof(buff), fp)) {
  30. tmp = libbpf_reallocarray(dd->sym_mapping, dd->sym_count + 1,
  31. sizeof(*dd->sym_mapping));
  32. if (!tmp) {
  33. out:
  34. free(dd->sym_mapping);
  35. dd->sym_mapping = NULL;
  36. fclose(fp);
  37. return;
  38. }
  39. dd->sym_mapping = tmp;
  40. sym = &dd->sym_mapping[dd->sym_count];
  41. if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
  42. continue;
  43. sym->address = (unsigned long)address;
  44. if (!strcmp(sym->name, "__bpf_call_base")) {
  45. dd->address_call_base = sym->address;
  46. /* sysctl kernel.kptr_restrict was set */
  47. if (!sym->address)
  48. goto out;
  49. }
  50. if (sym->address)
  51. dd->sym_count++;
  52. }
  53. fclose(fp);
  54. qsort(dd->sym_mapping, dd->sym_count,
  55. sizeof(*dd->sym_mapping), kernel_syms_cmp);
  56. }
  57. void kernel_syms_destroy(struct dump_data *dd)
  58. {
  59. free(dd->sym_mapping);
  60. }
  61. struct kernel_sym *kernel_syms_search(struct dump_data *dd,
  62. unsigned long key)
  63. {
  64. struct kernel_sym sym = {
  65. .address = key,
  66. };
  67. return dd->sym_mapping ?
  68. bsearch(&sym, dd->sym_mapping, dd->sym_count,
  69. sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
  70. }
  71. static void __printf(2, 3) print_insn(void *private_data, const char *fmt, ...)
  72. {
  73. va_list args;
  74. va_start(args, fmt);
  75. vprintf(fmt, args);
  76. va_end(args);
  77. }
  78. static void __printf(2, 3)
  79. print_insn_for_graph(void *private_data, const char *fmt, ...)
  80. {
  81. char buf[64], *p;
  82. va_list args;
  83. va_start(args, fmt);
  84. vsnprintf(buf, sizeof(buf), fmt, args);
  85. va_end(args);
  86. p = buf;
  87. while (*p != '\0') {
  88. if (*p == '\n') {
  89. memmove(p + 3, p, strlen(buf) + 1 - (p - buf));
  90. /* Align each instruction dump row left. */
  91. *p++ = '\\';
  92. *p++ = 'l';
  93. /* Output multiline concatenation. */
  94. *p++ = '\\';
  95. } else if (*p == '<' || *p == '>' || *p == '|' || *p == '&') {
  96. memmove(p + 1, p, strlen(buf) + 1 - (p - buf));
  97. /* Escape special character. */
  98. *p++ = '\\';
  99. }
  100. p++;
  101. }
  102. printf("%s", buf);
  103. }
  104. static void __printf(2, 3)
  105. print_insn_json(void *private_data, const char *fmt, ...)
  106. {
  107. unsigned int l = strlen(fmt);
  108. char chomped_fmt[l];
  109. va_list args;
  110. va_start(args, fmt);
  111. if (l > 0) {
  112. strncpy(chomped_fmt, fmt, l - 1);
  113. chomped_fmt[l - 1] = '\0';
  114. }
  115. jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
  116. va_end(args);
  117. }
  118. static const char *print_call_pcrel(struct dump_data *dd,
  119. struct kernel_sym *sym,
  120. unsigned long address,
  121. const struct bpf_insn *insn)
  122. {
  123. if (!dd->nr_jited_ksyms)
  124. /* Do not show address for interpreted programs */
  125. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  126. "%+d", insn->off);
  127. else if (sym)
  128. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  129. "%+d#%s", insn->off, sym->name);
  130. else
  131. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  132. "%+d#0x%lx", insn->off, address);
  133. return dd->scratch_buff;
  134. }
  135. static const char *print_call_helper(struct dump_data *dd,
  136. struct kernel_sym *sym,
  137. unsigned long address)
  138. {
  139. if (sym)
  140. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  141. "%s", sym->name);
  142. else
  143. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  144. "0x%lx", address);
  145. return dd->scratch_buff;
  146. }
  147. static const char *print_call(void *private_data,
  148. const struct bpf_insn *insn)
  149. {
  150. struct dump_data *dd = private_data;
  151. unsigned long address = dd->address_call_base + insn->imm;
  152. struct kernel_sym *sym;
  153. if (insn->src_reg == BPF_PSEUDO_CALL &&
  154. (__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms)
  155. address = dd->jited_ksyms[insn->imm];
  156. sym = kernel_syms_search(dd, address);
  157. if (insn->src_reg == BPF_PSEUDO_CALL)
  158. return print_call_pcrel(dd, sym, address, insn);
  159. else
  160. return print_call_helper(dd, sym, address);
  161. }
  162. static const char *print_imm(void *private_data,
  163. const struct bpf_insn *insn,
  164. __u64 full_imm)
  165. {
  166. struct dump_data *dd = private_data;
  167. if (insn->src_reg == BPF_PSEUDO_MAP_FD)
  168. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  169. "map[id:%u]", insn->imm);
  170. else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
  171. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  172. "map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
  173. else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE)
  174. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  175. "map[idx:%u]+%u", insn->imm, (insn + 1)->imm);
  176. else if (insn->src_reg == BPF_PSEUDO_FUNC)
  177. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  178. "subprog[%+d]", insn->imm);
  179. else
  180. snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
  181. "0x%llx", (unsigned long long)full_imm);
  182. return dd->scratch_buff;
  183. }
  184. void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
  185. bool opcodes, bool linum)
  186. {
  187. const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
  188. const struct bpf_insn_cbs cbs = {
  189. .cb_print = print_insn_json,
  190. .cb_call = print_call,
  191. .cb_imm = print_imm,
  192. .private_data = dd,
  193. };
  194. struct bpf_func_info *record;
  195. struct bpf_insn *insn = buf;
  196. struct btf *btf = dd->btf;
  197. bool double_insn = false;
  198. unsigned int nr_skip = 0;
  199. char func_sig[1024];
  200. unsigned int i;
  201. jsonw_start_array(json_wtr);
  202. record = dd->func_info;
  203. for (i = 0; i < len / sizeof(*insn); i++) {
  204. if (double_insn) {
  205. double_insn = false;
  206. continue;
  207. }
  208. double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
  209. jsonw_start_object(json_wtr);
  210. if (btf && record) {
  211. if (record->insn_off == i) {
  212. btf_dumper_type_only(btf, record->type_id,
  213. func_sig,
  214. sizeof(func_sig));
  215. if (func_sig[0] != '\0') {
  216. jsonw_name(json_wtr, "proto");
  217. jsonw_string(json_wtr, func_sig);
  218. }
  219. record = (void *)record + dd->finfo_rec_size;
  220. }
  221. }
  222. if (prog_linfo) {
  223. const struct bpf_line_info *linfo;
  224. linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
  225. if (linfo) {
  226. btf_dump_linfo_json(btf, linfo, linum);
  227. nr_skip++;
  228. }
  229. }
  230. jsonw_name(json_wtr, "disasm");
  231. print_bpf_insn(&cbs, insn + i, true);
  232. if (opcodes) {
  233. jsonw_name(json_wtr, "opcodes");
  234. jsonw_start_object(json_wtr);
  235. jsonw_name(json_wtr, "code");
  236. jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
  237. jsonw_name(json_wtr, "src_reg");
  238. jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
  239. jsonw_name(json_wtr, "dst_reg");
  240. jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
  241. jsonw_name(json_wtr, "off");
  242. print_hex_data_json((uint8_t *)(&insn[i].off), 2);
  243. jsonw_name(json_wtr, "imm");
  244. if (double_insn && i < len - 1)
  245. print_hex_data_json((uint8_t *)(&insn[i].imm),
  246. 12);
  247. else
  248. print_hex_data_json((uint8_t *)(&insn[i].imm),
  249. 4);
  250. jsonw_end_object(json_wtr);
  251. }
  252. jsonw_end_object(json_wtr);
  253. }
  254. jsonw_end_array(json_wtr);
  255. }
  256. void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
  257. bool opcodes, bool linum)
  258. {
  259. const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
  260. const struct bpf_insn_cbs cbs = {
  261. .cb_print = print_insn,
  262. .cb_call = print_call,
  263. .cb_imm = print_imm,
  264. .private_data = dd,
  265. };
  266. struct bpf_func_info *record;
  267. struct bpf_insn *insn = buf;
  268. struct btf *btf = dd->btf;
  269. unsigned int nr_skip = 0;
  270. bool double_insn = false;
  271. char func_sig[1024];
  272. unsigned int i;
  273. record = dd->func_info;
  274. for (i = 0; i < len / sizeof(*insn); i++) {
  275. if (double_insn) {
  276. double_insn = false;
  277. continue;
  278. }
  279. if (btf && record) {
  280. if (record->insn_off == i) {
  281. btf_dumper_type_only(btf, record->type_id,
  282. func_sig,
  283. sizeof(func_sig));
  284. if (func_sig[0] != '\0')
  285. printf("%s:\n", func_sig);
  286. record = (void *)record + dd->finfo_rec_size;
  287. }
  288. }
  289. if (prog_linfo) {
  290. const struct bpf_line_info *linfo;
  291. linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
  292. if (linfo) {
  293. btf_dump_linfo_plain(btf, linfo, "; ",
  294. linum);
  295. nr_skip++;
  296. }
  297. }
  298. double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
  299. printf("% 4d: ", i);
  300. print_bpf_insn(&cbs, insn + i, true);
  301. if (opcodes) {
  302. printf(" ");
  303. fprint_hex(stdout, insn + i, 8, " ");
  304. if (double_insn && i < len - 1) {
  305. printf(" ");
  306. fprint_hex(stdout, insn + i + 1, 8, " ");
  307. }
  308. printf("\n");
  309. }
  310. }
  311. }
  312. void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
  313. unsigned int start_idx)
  314. {
  315. const struct bpf_insn_cbs cbs = {
  316. .cb_print = print_insn_for_graph,
  317. .cb_call = print_call,
  318. .cb_imm = print_imm,
  319. .private_data = dd,
  320. };
  321. struct bpf_insn *insn_start = buf_start;
  322. struct bpf_insn *insn_end = buf_end;
  323. struct bpf_insn *cur = insn_start;
  324. bool double_insn = false;
  325. for (; cur <= insn_end; cur++) {
  326. if (double_insn) {
  327. double_insn = false;
  328. continue;
  329. }
  330. double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
  331. printf("% 4d: ", (int)(cur - insn_start + start_idx));
  332. print_bpf_insn(&cbs, cur, true);
  333. if (cur != insn_end)
  334. printf(" | ");
  335. }
  336. }