libbpf_probes.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2. /* Copyright (c) 2019 Netronome Systems, Inc. */
  3. #include <errno.h>
  4. #include <fcntl.h>
  5. #include <string.h>
  6. #include <stdlib.h>
  7. #include <unistd.h>
  8. #include <net/if.h>
  9. #include <sys/utsname.h>
  10. #include <linux/btf.h>
  11. #include <linux/filter.h>
  12. #include <linux/kernel.h>
  13. #include "bpf.h"
  14. #include "libbpf.h"
  15. #include "libbpf_internal.h"
  16. static int probe_prog_load(enum bpf_prog_type prog_type,
  17. const struct bpf_insn *insns, size_t insns_cnt,
  18. char *log_buf, size_t log_buf_sz)
  19. {
  20. LIBBPF_OPTS(bpf_prog_load_opts, opts,
  21. .log_buf = log_buf,
  22. .log_size = log_buf_sz,
  23. .log_level = log_buf ? 1 : 0,
  24. );
  25. int fd, err, exp_err = 0;
  26. const char *exp_msg = NULL;
  27. char buf[4096];
  28. switch (prog_type) {
  29. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  30. opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
  31. break;
  32. case BPF_PROG_TYPE_CGROUP_SOCKOPT:
  33. opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
  34. break;
  35. case BPF_PROG_TYPE_SK_LOOKUP:
  36. opts.expected_attach_type = BPF_SK_LOOKUP;
  37. break;
  38. case BPF_PROG_TYPE_KPROBE:
  39. opts.kern_version = get_kernel_version();
  40. break;
  41. case BPF_PROG_TYPE_LIRC_MODE2:
  42. opts.expected_attach_type = BPF_LIRC_MODE2;
  43. break;
  44. case BPF_PROG_TYPE_TRACING:
  45. case BPF_PROG_TYPE_LSM:
  46. opts.log_buf = buf;
  47. opts.log_size = sizeof(buf);
  48. opts.log_level = 1;
  49. if (prog_type == BPF_PROG_TYPE_TRACING)
  50. opts.expected_attach_type = BPF_TRACE_FENTRY;
  51. else
  52. opts.expected_attach_type = BPF_MODIFY_RETURN;
  53. opts.attach_btf_id = 1;
  54. exp_err = -EINVAL;
  55. exp_msg = "attach_btf_id 1 is not a function";
  56. break;
  57. case BPF_PROG_TYPE_EXT:
  58. opts.log_buf = buf;
  59. opts.log_size = sizeof(buf);
  60. opts.log_level = 1;
  61. opts.attach_btf_id = 1;
  62. exp_err = -EINVAL;
  63. exp_msg = "Cannot replace kernel functions";
  64. break;
  65. case BPF_PROG_TYPE_SYSCALL:
  66. opts.prog_flags = BPF_F_SLEEPABLE;
  67. break;
  68. case BPF_PROG_TYPE_STRUCT_OPS:
  69. exp_err = -524; /* -ENOTSUPP */
  70. break;
  71. case BPF_PROG_TYPE_UNSPEC:
  72. case BPF_PROG_TYPE_SOCKET_FILTER:
  73. case BPF_PROG_TYPE_SCHED_CLS:
  74. case BPF_PROG_TYPE_SCHED_ACT:
  75. case BPF_PROG_TYPE_TRACEPOINT:
  76. case BPF_PROG_TYPE_XDP:
  77. case BPF_PROG_TYPE_PERF_EVENT:
  78. case BPF_PROG_TYPE_CGROUP_SKB:
  79. case BPF_PROG_TYPE_CGROUP_SOCK:
  80. case BPF_PROG_TYPE_LWT_IN:
  81. case BPF_PROG_TYPE_LWT_OUT:
  82. case BPF_PROG_TYPE_LWT_XMIT:
  83. case BPF_PROG_TYPE_SOCK_OPS:
  84. case BPF_PROG_TYPE_SK_SKB:
  85. case BPF_PROG_TYPE_CGROUP_DEVICE:
  86. case BPF_PROG_TYPE_SK_MSG:
  87. case BPF_PROG_TYPE_RAW_TRACEPOINT:
  88. case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
  89. case BPF_PROG_TYPE_LWT_SEG6LOCAL:
  90. case BPF_PROG_TYPE_SK_REUSEPORT:
  91. case BPF_PROG_TYPE_FLOW_DISSECTOR:
  92. case BPF_PROG_TYPE_CGROUP_SYSCTL:
  93. break;
  94. default:
  95. return -EOPNOTSUPP;
  96. }
  97. fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
  98. err = -errno;
  99. if (fd >= 0)
  100. close(fd);
  101. if (exp_err) {
  102. if (fd >= 0 || err != exp_err)
  103. return 0;
  104. if (exp_msg && !strstr(buf, exp_msg))
  105. return 0;
  106. return 1;
  107. }
  108. return fd >= 0 ? 1 : 0;
  109. }
  110. int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
  111. {
  112. struct bpf_insn insns[] = {
  113. BPF_MOV64_IMM(BPF_REG_0, 0),
  114. BPF_EXIT_INSN()
  115. };
  116. const size_t insn_cnt = ARRAY_SIZE(insns);
  117. int ret;
  118. if (opts)
  119. return libbpf_err(-EINVAL);
  120. ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0);
  121. return libbpf_err(ret);
  122. }
  123. int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
  124. const char *str_sec, size_t str_len)
  125. {
  126. struct btf_header hdr = {
  127. .magic = BTF_MAGIC,
  128. .version = BTF_VERSION,
  129. .hdr_len = sizeof(struct btf_header),
  130. .type_len = types_len,
  131. .str_off = types_len,
  132. .str_len = str_len,
  133. };
  134. int btf_fd, btf_len;
  135. __u8 *raw_btf;
  136. btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
  137. raw_btf = malloc(btf_len);
  138. if (!raw_btf)
  139. return -ENOMEM;
  140. memcpy(raw_btf, &hdr, sizeof(hdr));
  141. memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
  142. memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
  143. btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
  144. free(raw_btf);
  145. return btf_fd;
  146. }
  147. static int load_local_storage_btf(void)
  148. {
  149. const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
  150. /* struct bpf_spin_lock {
  151. * int val;
  152. * };
  153. * struct val {
  154. * int cnt;
  155. * struct bpf_spin_lock l;
  156. * };
  157. */
  158. __u32 types[] = {
  159. /* int */
  160. BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
  161. /* struct bpf_spin_lock */ /* [2] */
  162. BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
  163. BTF_MEMBER_ENC(15, 1, 0), /* int val; */
  164. /* struct val */ /* [3] */
  165. BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
  166. BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
  167. BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
  168. };
  169. return libbpf__load_raw_btf((char *)types, sizeof(types),
  170. strs, sizeof(strs));
  171. }
  172. static int probe_map_create(enum bpf_map_type map_type)
  173. {
  174. LIBBPF_OPTS(bpf_map_create_opts, opts);
  175. int key_size, value_size, max_entries;
  176. __u32 btf_key_type_id = 0, btf_value_type_id = 0;
  177. int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err = 0;
  178. key_size = sizeof(__u32);
  179. value_size = sizeof(__u32);
  180. max_entries = 1;
  181. switch (map_type) {
  182. case BPF_MAP_TYPE_STACK_TRACE:
  183. value_size = sizeof(__u64);
  184. break;
  185. case BPF_MAP_TYPE_LPM_TRIE:
  186. key_size = sizeof(__u64);
  187. value_size = sizeof(__u64);
  188. opts.map_flags = BPF_F_NO_PREALLOC;
  189. break;
  190. case BPF_MAP_TYPE_CGROUP_STORAGE:
  191. case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
  192. key_size = sizeof(struct bpf_cgroup_storage_key);
  193. value_size = sizeof(__u64);
  194. max_entries = 0;
  195. break;
  196. case BPF_MAP_TYPE_QUEUE:
  197. case BPF_MAP_TYPE_STACK:
  198. key_size = 0;
  199. break;
  200. case BPF_MAP_TYPE_SK_STORAGE:
  201. case BPF_MAP_TYPE_INODE_STORAGE:
  202. case BPF_MAP_TYPE_TASK_STORAGE:
  203. btf_key_type_id = 1;
  204. btf_value_type_id = 3;
  205. value_size = 8;
  206. max_entries = 0;
  207. opts.map_flags = BPF_F_NO_PREALLOC;
  208. btf_fd = load_local_storage_btf();
  209. if (btf_fd < 0)
  210. return btf_fd;
  211. break;
  212. case BPF_MAP_TYPE_RINGBUF:
  213. case BPF_MAP_TYPE_USER_RINGBUF:
  214. key_size = 0;
  215. value_size = 0;
  216. max_entries = sysconf(_SC_PAGE_SIZE);
  217. break;
  218. case BPF_MAP_TYPE_STRUCT_OPS:
  219. /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
  220. opts.btf_vmlinux_value_type_id = 1;
  221. exp_err = -524; /* -ENOTSUPP */
  222. break;
  223. case BPF_MAP_TYPE_BLOOM_FILTER:
  224. key_size = 0;
  225. max_entries = 1;
  226. break;
  227. case BPF_MAP_TYPE_HASH:
  228. case BPF_MAP_TYPE_ARRAY:
  229. case BPF_MAP_TYPE_PROG_ARRAY:
  230. case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
  231. case BPF_MAP_TYPE_PERCPU_HASH:
  232. case BPF_MAP_TYPE_PERCPU_ARRAY:
  233. case BPF_MAP_TYPE_CGROUP_ARRAY:
  234. case BPF_MAP_TYPE_LRU_HASH:
  235. case BPF_MAP_TYPE_LRU_PERCPU_HASH:
  236. case BPF_MAP_TYPE_ARRAY_OF_MAPS:
  237. case BPF_MAP_TYPE_HASH_OF_MAPS:
  238. case BPF_MAP_TYPE_DEVMAP:
  239. case BPF_MAP_TYPE_DEVMAP_HASH:
  240. case BPF_MAP_TYPE_SOCKMAP:
  241. case BPF_MAP_TYPE_CPUMAP:
  242. case BPF_MAP_TYPE_XSKMAP:
  243. case BPF_MAP_TYPE_SOCKHASH:
  244. case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
  245. break;
  246. case BPF_MAP_TYPE_UNSPEC:
  247. default:
  248. return -EOPNOTSUPP;
  249. }
  250. if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
  251. map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  252. fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
  253. sizeof(__u32), sizeof(__u32), 1, NULL);
  254. if (fd_inner < 0)
  255. goto cleanup;
  256. opts.inner_map_fd = fd_inner;
  257. }
  258. if (btf_fd >= 0) {
  259. opts.btf_fd = btf_fd;
  260. opts.btf_key_type_id = btf_key_type_id;
  261. opts.btf_value_type_id = btf_value_type_id;
  262. }
  263. fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
  264. err = -errno;
  265. cleanup:
  266. if (fd >= 0)
  267. close(fd);
  268. if (fd_inner >= 0)
  269. close(fd_inner);
  270. if (btf_fd >= 0)
  271. close(btf_fd);
  272. if (exp_err)
  273. return fd < 0 && err == exp_err ? 1 : 0;
  274. else
  275. return fd >= 0 ? 1 : 0;
  276. }
  277. int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
  278. {
  279. int ret;
  280. if (opts)
  281. return libbpf_err(-EINVAL);
  282. ret = probe_map_create(map_type);
  283. return libbpf_err(ret);
  284. }
  285. int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
  286. const void *opts)
  287. {
  288. struct bpf_insn insns[] = {
  289. BPF_EMIT_CALL((__u32)helper_id),
  290. BPF_EXIT_INSN(),
  291. };
  292. const size_t insn_cnt = ARRAY_SIZE(insns);
  293. char buf[4096];
  294. int ret;
  295. if (opts)
  296. return libbpf_err(-EINVAL);
  297. /* we can't successfully load all prog types to check for BPF helper
  298. * support, so bail out with -EOPNOTSUPP error
  299. */
  300. switch (prog_type) {
  301. case BPF_PROG_TYPE_TRACING:
  302. case BPF_PROG_TYPE_EXT:
  303. case BPF_PROG_TYPE_LSM:
  304. case BPF_PROG_TYPE_STRUCT_OPS:
  305. return -EOPNOTSUPP;
  306. default:
  307. break;
  308. }
  309. buf[0] = '\0';
  310. ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf));
  311. if (ret < 0)
  312. return libbpf_err(ret);
  313. /* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
  314. * at all, it will emit something like "invalid func unknown#181".
  315. * If BPF verifier recognizes BPF helper but it's not supported for
  316. * given BPF program type, it will emit "unknown func bpf_sys_bpf#166".
  317. * In both cases, provided combination of BPF program type and BPF
  318. * helper is not supported by the kernel.
  319. * In all other cases, probe_prog_load() above will either succeed (e.g.,
  320. * because BPF helper happens to accept no input arguments or it
  321. * accepts one input argument and initial PTR_TO_CTX is fine for
  322. * that), or we'll get some more specific BPF verifier error about
  323. * some unsatisfied conditions.
  324. */
  325. if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ")))
  326. return 0;
  327. return 1; /* assume supported */
  328. }