skel_internal.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
  2. /* Copyright (c) 2021 Facebook */
  3. #ifndef __SKEL_INTERNAL_H
  4. #define __SKEL_INTERNAL_H
  5. #ifdef __KERNEL__
  6. #include <linux/fdtable.h>
  7. #include <linux/mm.h>
  8. #include <linux/mman.h>
  9. #include <linux/slab.h>
  10. #include <linux/bpf.h>
  11. #else
  12. #include <unistd.h>
  13. #include <sys/syscall.h>
  14. #include <sys/mman.h>
  15. #include <stdlib.h>
  16. #include "bpf.h"
  17. #endif
  18. #ifndef __NR_bpf
  19. # if defined(__mips__) && defined(_ABIO32)
  20. # define __NR_bpf 4355
  21. # elif defined(__mips__) && defined(_ABIN32)
  22. # define __NR_bpf 6319
  23. # elif defined(__mips__) && defined(_ABI64)
  24. # define __NR_bpf 5315
  25. # endif
  26. #endif
  27. /* This file is a base header for auto-generated *.lskel.h files.
  28. * Its contents will change and may become part of auto-generation in the future.
  29. *
  30. * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
  31. * and will change from one version of libbpf to another and features
  32. * requested during loader program generation.
  33. */
  34. struct bpf_map_desc {
  35. /* output of the loader prog */
  36. int map_fd;
  37. /* input for the loader prog */
  38. __u32 max_entries;
  39. __aligned_u64 initial_value;
  40. };
  41. struct bpf_prog_desc {
  42. int prog_fd;
  43. };
  44. enum {
  45. BPF_SKEL_KERNEL = (1ULL << 0),
  46. };
  47. struct bpf_loader_ctx {
  48. __u32 sz;
  49. __u32 flags;
  50. __u32 log_level;
  51. __u32 log_size;
  52. __u64 log_buf;
  53. };
  54. struct bpf_load_and_run_opts {
  55. struct bpf_loader_ctx *ctx;
  56. const void *data;
  57. const void *insns;
  58. __u32 data_sz;
  59. __u32 insns_sz;
  60. const char *errstr;
  61. };
  62. long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
  63. static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
  64. unsigned int size)
  65. {
  66. #ifdef __KERNEL__
  67. return kern_sys_bpf(cmd, attr, size);
  68. #else
  69. return syscall(__NR_bpf, cmd, attr, size);
  70. #endif
  71. }
  72. #ifdef __KERNEL__
  73. static inline int close(int fd)
  74. {
  75. return close_fd(fd);
  76. }
  77. static inline void *skel_alloc(size_t size)
  78. {
  79. struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL);
  80. if (!ctx)
  81. return NULL;
  82. ctx->flags |= BPF_SKEL_KERNEL;
  83. return ctx;
  84. }
  85. static inline void skel_free(const void *p)
  86. {
  87. kfree(p);
  88. }
  89. /* skel->bss/rodata maps are populated the following way:
  90. *
  91. * For kernel use:
  92. * skel_prep_map_data() allocates kernel memory that kernel module can directly access.
  93. * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
  94. * The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
  95. * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
  96. * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
  97. * is not nessary.
  98. *
  99. * For user space:
  100. * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
  101. * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
  102. * The loader program will perform copy_from_user() from maps.rodata.initial_value.
  103. * skel_finalize_map_data() remaps bpf array map value from the kernel memory into
  104. * skel->rodata address.
  105. *
  106. * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
  107. * both kernel and user space. The generated loader program does
  108. * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
  109. * depending on bpf_loader_ctx->flags.
  110. */
  111. static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
  112. {
  113. if (addr != ~0ULL)
  114. kvfree(p);
  115. /* When addr == ~0ULL the 'p' points to
  116. * ((struct bpf_array *)map)->value. See skel_finalize_map_data.
  117. */
  118. }
  119. static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
  120. {
  121. void *addr;
  122. addr = kvmalloc(val_sz, GFP_KERNEL);
  123. if (!addr)
  124. return NULL;
  125. memcpy(addr, val, val_sz);
  126. return addr;
  127. }
  128. static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
  129. {
  130. struct bpf_map *map;
  131. void *addr = NULL;
  132. kvfree((void *) (long) *init_val);
  133. *init_val = ~0ULL;
  134. /* At this point bpf_load_and_run() finished without error and
  135. * 'fd' is a valid bpf map FD. All sanity checks below should succeed.
  136. */
  137. map = bpf_map_get(fd);
  138. if (IS_ERR(map))
  139. return NULL;
  140. if (map->map_type != BPF_MAP_TYPE_ARRAY)
  141. goto out;
  142. addr = ((struct bpf_array *)map)->value;
  143. /* the addr stays valid, since FD is not closed */
  144. out:
  145. bpf_map_put(map);
  146. return addr;
  147. }
  148. #else
  149. static inline void *skel_alloc(size_t size)
  150. {
  151. return calloc(1, size);
  152. }
  153. static inline void skel_free(void *p)
  154. {
  155. free(p);
  156. }
  157. static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
  158. {
  159. munmap(p, sz);
  160. }
  161. static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
  162. {
  163. void *addr;
  164. addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
  165. MAP_SHARED | MAP_ANONYMOUS, -1, 0);
  166. if (addr == (void *) -1)
  167. return NULL;
  168. memcpy(addr, val, val_sz);
  169. return addr;
  170. }
  171. static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
  172. {
  173. void *addr;
  174. addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
  175. if (addr == (void *) -1)
  176. return NULL;
  177. return addr;
  178. }
  179. #endif
  180. static inline int skel_closenz(int fd)
  181. {
  182. if (fd > 0)
  183. return close(fd);
  184. return -EINVAL;
  185. }
  186. #ifndef offsetofend
  187. #define offsetofend(TYPE, MEMBER) \
  188. (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
  189. #endif
  190. static inline int skel_map_create(enum bpf_map_type map_type,
  191. const char *map_name,
  192. __u32 key_size,
  193. __u32 value_size,
  194. __u32 max_entries)
  195. {
  196. const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
  197. union bpf_attr attr;
  198. memset(&attr, 0, attr_sz);
  199. attr.map_type = map_type;
  200. strncpy(attr.map_name, map_name, sizeof(attr.map_name));
  201. attr.key_size = key_size;
  202. attr.value_size = value_size;
  203. attr.max_entries = max_entries;
  204. return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz);
  205. }
  206. static inline int skel_map_update_elem(int fd, const void *key,
  207. const void *value, __u64 flags)
  208. {
  209. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  210. union bpf_attr attr;
  211. memset(&attr, 0, attr_sz);
  212. attr.map_fd = fd;
  213. attr.key = (long) key;
  214. attr.value = (long) value;
  215. attr.flags = flags;
  216. return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
  217. }
  218. static inline int skel_map_delete_elem(int fd, const void *key)
  219. {
  220. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  221. union bpf_attr attr;
  222. memset(&attr, 0, attr_sz);
  223. attr.map_fd = fd;
  224. attr.key = (long)key;
  225. return skel_sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
  226. }
  227. static inline int skel_map_get_fd_by_id(__u32 id)
  228. {
  229. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  230. union bpf_attr attr;
  231. memset(&attr, 0, attr_sz);
  232. attr.map_id = id;
  233. return skel_sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
  234. }
  235. static inline int skel_raw_tracepoint_open(const char *name, int prog_fd)
  236. {
  237. const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd);
  238. union bpf_attr attr;
  239. memset(&attr, 0, attr_sz);
  240. attr.raw_tracepoint.name = (long) name;
  241. attr.raw_tracepoint.prog_fd = prog_fd;
  242. return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
  243. }
  244. static inline int skel_link_create(int prog_fd, int target_fd,
  245. enum bpf_attach_type attach_type)
  246. {
  247. const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len);
  248. union bpf_attr attr;
  249. memset(&attr, 0, attr_sz);
  250. attr.link_create.prog_fd = prog_fd;
  251. attr.link_create.target_fd = target_fd;
  252. attr.link_create.attach_type = attach_type;
  253. return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
  254. }
  255. #ifdef __KERNEL__
  256. #define set_err
  257. #else
  258. #define set_err err = -errno
  259. #endif
  260. static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
  261. {
  262. const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array);
  263. const size_t test_run_attr_sz = offsetofend(union bpf_attr, test);
  264. int map_fd = -1, prog_fd = -1, key = 0, err;
  265. union bpf_attr attr;
  266. err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
  267. if (map_fd < 0) {
  268. opts->errstr = "failed to create loader map";
  269. set_err;
  270. goto out;
  271. }
  272. err = skel_map_update_elem(map_fd, &key, opts->data, 0);
  273. if (err < 0) {
  274. opts->errstr = "failed to update loader map";
  275. set_err;
  276. goto out;
  277. }
  278. memset(&attr, 0, prog_load_attr_sz);
  279. attr.prog_type = BPF_PROG_TYPE_SYSCALL;
  280. attr.insns = (long) opts->insns;
  281. attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
  282. attr.license = (long) "Dual BSD/GPL";
  283. memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
  284. attr.fd_array = (long) &map_fd;
  285. attr.log_level = opts->ctx->log_level;
  286. attr.log_size = opts->ctx->log_size;
  287. attr.log_buf = opts->ctx->log_buf;
  288. attr.prog_flags = BPF_F_SLEEPABLE;
  289. err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
  290. if (prog_fd < 0) {
  291. opts->errstr = "failed to load loader prog";
  292. set_err;
  293. goto out;
  294. }
  295. memset(&attr, 0, test_run_attr_sz);
  296. attr.test.prog_fd = prog_fd;
  297. attr.test.ctx_in = (long) opts->ctx;
  298. attr.test.ctx_size_in = opts->ctx->sz;
  299. err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
  300. if (err < 0 || (int)attr.test.retval < 0) {
  301. opts->errstr = "failed to execute loader prog";
  302. if (err < 0) {
  303. set_err;
  304. } else {
  305. err = (int)attr.test.retval;
  306. #ifndef __KERNEL__
  307. errno = -err;
  308. #endif
  309. }
  310. goto out;
  311. }
  312. err = 0;
  313. out:
  314. if (map_fd >= 0)
  315. close(map_fd);
  316. if (prog_fd >= 0)
  317. close(prog_fd);
  318. return err;
  319. }
  320. #endif