gen_loader.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2. /* Copyright (c) 2021 Facebook */
  3. #include <stdio.h>
  4. #include <stdlib.h>
  5. #include <string.h>
  6. #include <errno.h>
  7. #include <linux/filter.h>
  8. #include <sys/param.h>
  9. #include "btf.h"
  10. #include "bpf.h"
  11. #include "libbpf.h"
  12. #include "libbpf_internal.h"
  13. #include "hashmap.h"
  14. #include "bpf_gen_internal.h"
  15. #include "skel_internal.h"
  16. #include <asm/byteorder.h>
  17. #define MAX_USED_MAPS 64
  18. #define MAX_USED_PROGS 32
  19. #define MAX_KFUNC_DESCS 256
  20. #define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
  21. /* The following structure describes the stack layout of the loader program.
  22. * In addition R6 contains the pointer to context.
  23. * R7 contains the result of the last sys_bpf command (typically error or FD).
  24. * R9 contains the result of the last sys_close command.
  25. *
  26. * Naming convention:
  27. * ctx - bpf program context
  28. * stack - bpf program stack
  29. * blob - bpf_attr-s, strings, insns, map data.
  30. * All the bytes that loader prog will use for read/write.
  31. */
  32. struct loader_stack {
  33. __u32 btf_fd;
  34. __u32 inner_map_fd;
  35. __u32 prog_fd[MAX_USED_PROGS];
  36. };
  37. #define stack_off(field) \
  38. (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
  39. #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
  40. static int blob_fd_array_off(struct bpf_gen *gen, int index)
  41. {
  42. return gen->fd_array + index * sizeof(int);
  43. }
  44. static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
  45. {
  46. size_t off = gen->insn_cur - gen->insn_start;
  47. void *insn_start;
  48. if (gen->error)
  49. return gen->error;
  50. if (size > INT32_MAX || off + size > INT32_MAX) {
  51. gen->error = -ERANGE;
  52. return -ERANGE;
  53. }
  54. insn_start = realloc(gen->insn_start, off + size);
  55. if (!insn_start) {
  56. gen->error = -ENOMEM;
  57. free(gen->insn_start);
  58. gen->insn_start = NULL;
  59. return -ENOMEM;
  60. }
  61. gen->insn_start = insn_start;
  62. gen->insn_cur = insn_start + off;
  63. return 0;
  64. }
  65. static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
  66. {
  67. size_t off = gen->data_cur - gen->data_start;
  68. void *data_start;
  69. if (gen->error)
  70. return gen->error;
  71. if (size > INT32_MAX || off + size > INT32_MAX) {
  72. gen->error = -ERANGE;
  73. return -ERANGE;
  74. }
  75. data_start = realloc(gen->data_start, off + size);
  76. if (!data_start) {
  77. gen->error = -ENOMEM;
  78. free(gen->data_start);
  79. gen->data_start = NULL;
  80. return -ENOMEM;
  81. }
  82. gen->data_start = data_start;
  83. gen->data_cur = data_start + off;
  84. return 0;
  85. }
  86. static void emit(struct bpf_gen *gen, struct bpf_insn insn)
  87. {
  88. if (realloc_insn_buf(gen, sizeof(insn)))
  89. return;
  90. memcpy(gen->insn_cur, &insn, sizeof(insn));
  91. gen->insn_cur += sizeof(insn);
  92. }
  93. static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
  94. {
  95. emit(gen, insn1);
  96. emit(gen, insn2);
  97. }
  98. static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
  99. static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
  100. void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
  101. {
  102. size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
  103. int i;
  104. gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
  105. gen->log_level = log_level;
  106. /* save ctx pointer into R6 */
  107. emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
  108. /* bzero stack */
  109. emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
  110. emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
  111. emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
  112. emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
  113. emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
  114. /* amount of stack actually used, only used to calculate iterations, not stack offset */
  115. nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
  116. /* jump over cleanup code */
  117. emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
  118. /* size of cleanup code below (including map fd cleanup) */
  119. (nr_progs_sz / 4) * 3 + 2 +
  120. /* 6 insns for emit_sys_close_blob,
  121. * 6 insns for debug_regs in emit_sys_close_blob
  122. */
  123. nr_maps * (6 + (gen->log_level ? 6 : 0))));
  124. /* remember the label where all error branches will jump to */
  125. gen->cleanup_label = gen->insn_cur - gen->insn_start;
  126. /* emit cleanup code: close all temp FDs */
  127. for (i = 0; i < nr_progs_sz; i += 4) {
  128. emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
  129. emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
  130. emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
  131. }
  132. for (i = 0; i < nr_maps; i++)
  133. emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
  134. /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
  135. emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
  136. emit(gen, BPF_EXIT_INSN());
  137. }
  138. static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
  139. {
  140. __u32 size8 = roundup(size, 8);
  141. __u64 zero = 0;
  142. void *prev;
  143. if (realloc_data_buf(gen, size8))
  144. return 0;
  145. prev = gen->data_cur;
  146. if (data) {
  147. memcpy(gen->data_cur, data, size);
  148. memcpy(gen->data_cur + size, &zero, size8 - size);
  149. } else {
  150. memset(gen->data_cur, 0, size8);
  151. }
  152. gen->data_cur += size8;
  153. return prev - gen->data_start;
  154. }
  155. /* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
  156. * to start of fd_array. Caller can decide if it is usable or not.
  157. */
  158. static int add_map_fd(struct bpf_gen *gen)
  159. {
  160. if (gen->nr_maps == MAX_USED_MAPS) {
  161. pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
  162. gen->error = -E2BIG;
  163. return 0;
  164. }
  165. return gen->nr_maps++;
  166. }
  167. static int add_kfunc_btf_fd(struct bpf_gen *gen)
  168. {
  169. int cur;
  170. if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
  171. cur = add_data(gen, NULL, sizeof(int));
  172. return (cur - gen->fd_array) / sizeof(int);
  173. }
  174. return MAX_USED_MAPS + gen->nr_fd_array++;
  175. }
  176. static int insn_bytes_to_bpf_size(__u32 sz)
  177. {
  178. switch (sz) {
  179. case 8: return BPF_DW;
  180. case 4: return BPF_W;
  181. case 2: return BPF_H;
  182. case 1: return BPF_B;
  183. default: return -1;
  184. }
  185. }
  186. /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
  187. static void emit_rel_store(struct bpf_gen *gen, int off, int data)
  188. {
  189. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
  190. 0, 0, 0, data));
  191. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  192. 0, 0, 0, off));
  193. emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
  194. }
  195. static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
  196. {
  197. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
  198. 0, 0, 0, blob_off));
  199. emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
  200. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  201. 0, 0, 0, off));
  202. emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
  203. }
  204. static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
  205. {
  206. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  207. 0, 0, 0, blob_off));
  208. emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
  209. emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
  210. }
  211. static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
  212. bool check_non_zero)
  213. {
  214. emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
  215. if (check_non_zero)
  216. /* If value in ctx is zero don't update the blob.
  217. * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
  218. */
  219. emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
  220. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  221. 0, 0, 0, off));
  222. emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
  223. }
  224. static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
  225. {
  226. emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
  227. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  228. 0, 0, 0, off));
  229. emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
  230. }
  231. static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
  232. {
  233. emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
  234. emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
  235. }
  236. static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
  237. {
  238. emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
  239. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
  240. 0, 0, 0, attr));
  241. emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
  242. emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
  243. /* remember the result in R7 */
  244. emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
  245. }
  246. static bool is_simm16(__s64 value)
  247. {
  248. return value == (__s64)(__s16)value;
  249. }
  250. static void emit_check_err(struct bpf_gen *gen)
  251. {
  252. __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
  253. /* R7 contains result of last sys_bpf command.
  254. * if (R7 < 0) goto cleanup;
  255. */
  256. if (is_simm16(off)) {
  257. emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
  258. } else {
  259. gen->error = -ERANGE;
  260. emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
  261. }
  262. }
  263. /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
  264. static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
  265. const char *fmt, va_list args)
  266. {
  267. char buf[1024];
  268. int addr, len, ret;
  269. if (!gen->log_level)
  270. return;
  271. ret = vsnprintf(buf, sizeof(buf), fmt, args);
  272. if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
  273. /* The special case to accommodate common debug_ret():
  274. * to avoid specifying BPF_REG_7 and adding " r=%%d" to
  275. * prints explicitly.
  276. */
  277. strcat(buf, " r=%d");
  278. len = strlen(buf) + 1;
  279. addr = add_data(gen, buf, len);
  280. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  281. 0, 0, 0, addr));
  282. emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
  283. if (reg1 >= 0)
  284. emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
  285. if (reg2 >= 0)
  286. emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
  287. emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
  288. }
  289. static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
  290. {
  291. va_list args;
  292. va_start(args, fmt);
  293. emit_debug(gen, reg1, reg2, fmt, args);
  294. va_end(args);
  295. }
  296. static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
  297. {
  298. va_list args;
  299. va_start(args, fmt);
  300. emit_debug(gen, BPF_REG_7, -1, fmt, args);
  301. va_end(args);
  302. }
  303. static void __emit_sys_close(struct bpf_gen *gen)
  304. {
  305. emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
  306. /* 2 is the number of the following insns
  307. * * 6 is additional insns in debug_regs
  308. */
  309. 2 + (gen->log_level ? 6 : 0)));
  310. emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
  311. emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
  312. debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
  313. }
  314. static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
  315. {
  316. emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
  317. __emit_sys_close(gen);
  318. }
  319. static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
  320. {
  321. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
  322. 0, 0, 0, blob_off));
  323. emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
  324. __emit_sys_close(gen);
  325. }
  326. int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
  327. {
  328. int i;
  329. if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
  330. pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
  331. nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
  332. gen->error = -EFAULT;
  333. return gen->error;
  334. }
  335. emit_sys_close_stack(gen, stack_off(btf_fd));
  336. for (i = 0; i < gen->nr_progs; i++)
  337. move_stack2ctx(gen,
  338. sizeof(struct bpf_loader_ctx) +
  339. sizeof(struct bpf_map_desc) * gen->nr_maps +
  340. sizeof(struct bpf_prog_desc) * i +
  341. offsetof(struct bpf_prog_desc, prog_fd), 4,
  342. stack_off(prog_fd[i]));
  343. for (i = 0; i < gen->nr_maps; i++)
  344. move_blob2ctx(gen,
  345. sizeof(struct bpf_loader_ctx) +
  346. sizeof(struct bpf_map_desc) * i +
  347. offsetof(struct bpf_map_desc, map_fd), 4,
  348. blob_fd_array_off(gen, i));
  349. emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
  350. emit(gen, BPF_EXIT_INSN());
  351. pr_debug("gen: finish %d\n", gen->error);
  352. if (!gen->error) {
  353. struct gen_loader_opts *opts = gen->opts;
  354. opts->insns = gen->insn_start;
  355. opts->insns_sz = gen->insn_cur - gen->insn_start;
  356. opts->data = gen->data_start;
  357. opts->data_sz = gen->data_cur - gen->data_start;
  358. }
  359. return gen->error;
  360. }
  361. void bpf_gen__free(struct bpf_gen *gen)
  362. {
  363. if (!gen)
  364. return;
  365. free(gen->data_start);
  366. free(gen->insn_start);
  367. free(gen);
  368. }
  369. void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
  370. __u32 btf_raw_size)
  371. {
  372. int attr_size = offsetofend(union bpf_attr, btf_log_level);
  373. int btf_data, btf_load_attr;
  374. union bpf_attr attr;
  375. memset(&attr, 0, attr_size);
  376. pr_debug("gen: load_btf: size %d\n", btf_raw_size);
  377. btf_data = add_data(gen, btf_raw_data, btf_raw_size);
  378. attr.btf_size = btf_raw_size;
  379. btf_load_attr = add_data(gen, &attr, attr_size);
  380. /* populate union bpf_attr with user provided log details */
  381. move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
  382. offsetof(struct bpf_loader_ctx, log_level), false);
  383. move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
  384. offsetof(struct bpf_loader_ctx, log_size), false);
  385. move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
  386. offsetof(struct bpf_loader_ctx, log_buf), false);
  387. /* populate union bpf_attr with a pointer to the BTF data */
  388. emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
  389. /* emit BTF_LOAD command */
  390. emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
  391. debug_ret(gen, "btf_load size %d", btf_raw_size);
  392. emit_check_err(gen);
  393. /* remember btf_fd in the stack, if successful */
  394. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
  395. }
  396. void bpf_gen__map_create(struct bpf_gen *gen,
  397. enum bpf_map_type map_type,
  398. const char *map_name,
  399. __u32 key_size, __u32 value_size, __u32 max_entries,
  400. struct bpf_map_create_opts *map_attr, int map_idx)
  401. {
  402. int attr_size = offsetofend(union bpf_attr, map_extra);
  403. bool close_inner_map_fd = false;
  404. int map_create_attr, idx;
  405. union bpf_attr attr;
  406. memset(&attr, 0, attr_size);
  407. attr.map_type = map_type;
  408. attr.key_size = key_size;
  409. attr.value_size = value_size;
  410. attr.map_flags = map_attr->map_flags;
  411. attr.map_extra = map_attr->map_extra;
  412. if (map_name)
  413. libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
  414. attr.numa_node = map_attr->numa_node;
  415. attr.map_ifindex = map_attr->map_ifindex;
  416. attr.max_entries = max_entries;
  417. attr.btf_key_type_id = map_attr->btf_key_type_id;
  418. attr.btf_value_type_id = map_attr->btf_value_type_id;
  419. pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
  420. attr.map_name, map_idx, map_type, attr.btf_value_type_id);
  421. map_create_attr = add_data(gen, &attr, attr_size);
  422. if (attr.btf_value_type_id)
  423. /* populate union bpf_attr with btf_fd saved in the stack earlier */
  424. move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
  425. stack_off(btf_fd));
  426. switch (attr.map_type) {
  427. case BPF_MAP_TYPE_ARRAY_OF_MAPS:
  428. case BPF_MAP_TYPE_HASH_OF_MAPS:
  429. move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
  430. stack_off(inner_map_fd));
  431. close_inner_map_fd = true;
  432. break;
  433. default:
  434. break;
  435. }
  436. /* conditionally update max_entries */
  437. if (map_idx >= 0)
  438. move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
  439. sizeof(struct bpf_loader_ctx) +
  440. sizeof(struct bpf_map_desc) * map_idx +
  441. offsetof(struct bpf_map_desc, max_entries),
  442. true /* check that max_entries != 0 */);
  443. /* emit MAP_CREATE command */
  444. emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
  445. debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
  446. attr.map_name, map_idx, map_type, value_size,
  447. attr.btf_value_type_id);
  448. emit_check_err(gen);
  449. /* remember map_fd in the stack, if successful */
  450. if (map_idx < 0) {
  451. /* This bpf_gen__map_create() function is called with map_idx >= 0
  452. * for all maps that libbpf loading logic tracks.
  453. * It's called with -1 to create an inner map.
  454. */
  455. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
  456. stack_off(inner_map_fd)));
  457. } else if (map_idx != gen->nr_maps) {
  458. gen->error = -EDOM; /* internal bug */
  459. return;
  460. } else {
  461. /* add_map_fd does gen->nr_maps++ */
  462. idx = add_map_fd(gen);
  463. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  464. 0, 0, 0, blob_fd_array_off(gen, idx)));
  465. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
  466. }
  467. if (close_inner_map_fd)
  468. emit_sys_close_stack(gen, stack_off(inner_map_fd));
  469. }
  470. void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
  471. enum bpf_attach_type type)
  472. {
  473. const char *prefix;
  474. int kind, ret;
  475. btf_get_kernel_prefix_kind(type, &prefix, &kind);
  476. gen->attach_kind = kind;
  477. ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
  478. prefix, attach_name);
  479. if (ret >= sizeof(gen->attach_target))
  480. gen->error = -ENOSPC;
  481. }
  482. static void emit_find_attach_target(struct bpf_gen *gen)
  483. {
  484. int name, len = strlen(gen->attach_target) + 1;
  485. pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
  486. name = add_data(gen, gen->attach_target, len);
  487. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  488. 0, 0, 0, name));
  489. emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
  490. emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
  491. emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
  492. emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
  493. emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
  494. debug_ret(gen, "find_by_name_kind(%s,%d)",
  495. gen->attach_target, gen->attach_kind);
  496. emit_check_err(gen);
  497. /* if successful, btf_id is in lower 32-bit of R7 and
  498. * btf_obj_fd is in upper 32-bit
  499. */
  500. }
  501. void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
  502. bool is_typeless, int kind, int insn_idx)
  503. {
  504. struct ksym_relo_desc *relo;
  505. relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
  506. if (!relo) {
  507. gen->error = -ENOMEM;
  508. return;
  509. }
  510. gen->relos = relo;
  511. relo += gen->relo_cnt;
  512. relo->name = name;
  513. relo->is_weak = is_weak;
  514. relo->is_typeless = is_typeless;
  515. relo->kind = kind;
  516. relo->insn_idx = insn_idx;
  517. gen->relo_cnt++;
  518. }
  519. /* returns existing ksym_desc with ref incremented, or inserts a new one */
  520. static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
  521. {
  522. struct ksym_desc *kdesc;
  523. int i;
  524. for (i = 0; i < gen->nr_ksyms; i++) {
  525. if (!strcmp(gen->ksyms[i].name, relo->name)) {
  526. gen->ksyms[i].ref++;
  527. return &gen->ksyms[i];
  528. }
  529. }
  530. kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
  531. if (!kdesc) {
  532. gen->error = -ENOMEM;
  533. return NULL;
  534. }
  535. gen->ksyms = kdesc;
  536. kdesc = &gen->ksyms[gen->nr_ksyms++];
  537. kdesc->name = relo->name;
  538. kdesc->kind = relo->kind;
  539. kdesc->ref = 1;
  540. kdesc->off = 0;
  541. kdesc->insn = 0;
  542. return kdesc;
  543. }
  544. /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
  545. * Returns result in BPF_REG_7
  546. */
  547. static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
  548. {
  549. int name_off, len = strlen(relo->name) + 1;
  550. name_off = add_data(gen, relo->name, len);
  551. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  552. 0, 0, 0, name_off));
  553. emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
  554. emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
  555. emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
  556. emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
  557. emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
  558. debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
  559. }
  560. /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
  561. * Returns result in BPF_REG_7
  562. * Returns u64 symbol addr in BPF_REG_9
  563. */
  564. static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
  565. {
  566. int name_off, len = strlen(relo->name) + 1, res_off;
  567. name_off = add_data(gen, relo->name, len);
  568. res_off = add_data(gen, NULL, 8); /* res is u64 */
  569. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  570. 0, 0, 0, name_off));
  571. emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
  572. emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
  573. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
  574. 0, 0, 0, res_off));
  575. emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
  576. emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
  577. emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
  578. emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
  579. debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
  580. }
  581. /* Expects:
  582. * BPF_REG_8 - pointer to instruction
  583. *
  584. * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
  585. * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
  586. * this would mean a new BTF fd index for each entry. By pairing symbol name
  587. * with index, we get the insn->imm, insn->off pairing that kernel uses for
  588. * kfunc_tab, which becomes the effective limit even though all of them may
  589. * share same index in fd_array (such that kfunc_btf_tab has 1 element).
  590. */
  591. static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
  592. {
  593. struct ksym_desc *kdesc;
  594. int btf_fd_idx;
  595. kdesc = get_ksym_desc(gen, relo);
  596. if (!kdesc)
  597. return;
  598. /* try to copy from existing bpf_insn */
  599. if (kdesc->ref > 1) {
  600. move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
  601. kdesc->insn + offsetof(struct bpf_insn, imm));
  602. move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
  603. kdesc->insn + offsetof(struct bpf_insn, off));
  604. goto log;
  605. }
  606. /* remember insn offset, so we can copy BTF ID and FD later */
  607. kdesc->insn = insn;
  608. emit_bpf_find_by_name_kind(gen, relo);
  609. if (!relo->is_weak)
  610. emit_check_err(gen);
  611. /* get index in fd_array to store BTF FD at */
  612. btf_fd_idx = add_kfunc_btf_fd(gen);
  613. if (btf_fd_idx > INT16_MAX) {
  614. pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
  615. btf_fd_idx, relo->name);
  616. gen->error = -E2BIG;
  617. return;
  618. }
  619. kdesc->off = btf_fd_idx;
  620. /* jump to success case */
  621. emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
  622. /* set value for imm, off as 0 */
  623. emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
  624. emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
  625. /* skip success case for ret < 0 */
  626. emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10));
  627. /* store btf_id into insn[insn_idx].imm */
  628. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
  629. /* obtain fd in BPF_REG_9 */
  630. emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
  631. emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
  632. /* jump to fd_array store if fd denotes module BTF */
  633. emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
  634. /* set the default value for off */
  635. emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
  636. /* skip BTF fd store for vmlinux BTF */
  637. emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
  638. /* load fd_array slot pointer */
  639. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
  640. 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
  641. /* store BTF fd in slot */
  642. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
  643. /* store index into insn[insn_idx].off */
  644. emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
  645. log:
  646. if (!gen->log_level)
  647. return;
  648. emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
  649. offsetof(struct bpf_insn, imm)));
  650. emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
  651. offsetof(struct bpf_insn, off)));
  652. debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
  653. relo->name, kdesc->ref);
  654. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
  655. 0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
  656. emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
  657. debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
  658. relo->name, kdesc->ref);
  659. }
  660. static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
  661. int ref)
  662. {
  663. if (!gen->log_level)
  664. return;
  665. emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
  666. offsetof(struct bpf_insn, imm)));
  667. emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
  668. offsetof(struct bpf_insn, imm)));
  669. debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
  670. relo->is_typeless, relo->is_weak, relo->name, ref);
  671. emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
  672. debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
  673. relo->is_typeless, relo->is_weak, relo->name, ref);
  674. }
  675. /* Expects:
  676. * BPF_REG_8 - pointer to instruction
  677. */
  678. static void emit_relo_ksym_typeless(struct bpf_gen *gen,
  679. struct ksym_relo_desc *relo, int insn)
  680. {
  681. struct ksym_desc *kdesc;
  682. kdesc = get_ksym_desc(gen, relo);
  683. if (!kdesc)
  684. return;
  685. /* try to copy from existing ldimm64 insn */
  686. if (kdesc->ref > 1) {
  687. move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
  688. kdesc->insn + offsetof(struct bpf_insn, imm));
  689. move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
  690. kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
  691. goto log;
  692. }
  693. /* remember insn offset, so we can copy ksym addr later */
  694. kdesc->insn = insn;
  695. /* skip typeless ksym_desc in fd closing loop in cleanup_relos */
  696. kdesc->typeless = true;
  697. emit_bpf_kallsyms_lookup_name(gen, relo);
  698. emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
  699. emit_check_err(gen);
  700. /* store lower half of addr into insn[insn_idx].imm */
  701. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
  702. /* store upper half of addr into insn[insn_idx + 1].imm */
  703. emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
  704. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
  705. sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
  706. log:
  707. emit_ksym_relo_log(gen, relo, kdesc->ref);
  708. }
  709. static __u32 src_reg_mask(void)
  710. {
  711. #if defined(__LITTLE_ENDIAN_BITFIELD)
  712. return 0x0f; /* src_reg,dst_reg,... */
  713. #elif defined(__BIG_ENDIAN_BITFIELD)
  714. return 0xf0; /* dst_reg,src_reg,... */
  715. #else
  716. #error "Unsupported bit endianness, cannot proceed"
  717. #endif
  718. }
  719. /* Expects:
  720. * BPF_REG_8 - pointer to instruction
  721. */
  722. static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
  723. {
  724. struct ksym_desc *kdesc;
  725. __u32 reg_mask;
  726. kdesc = get_ksym_desc(gen, relo);
  727. if (!kdesc)
  728. return;
  729. /* try to copy from existing ldimm64 insn */
  730. if (kdesc->ref > 1) {
  731. move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
  732. kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
  733. move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
  734. kdesc->insn + offsetof(struct bpf_insn, imm));
  735. /* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
  736. * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
  737. */
  738. emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
  739. goto clear_src_reg;
  740. }
  741. /* remember insn offset, so we can copy BTF ID and FD later */
  742. kdesc->insn = insn;
  743. emit_bpf_find_by_name_kind(gen, relo);
  744. if (!relo->is_weak)
  745. emit_check_err(gen);
  746. /* jump to success case */
  747. emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
  748. /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */
  749. emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
  750. emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
  751. /* skip success case for ret < 0 */
  752. emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
  753. /* store btf_id into insn[insn_idx].imm */
  754. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
  755. /* store btf_obj_fd into insn[insn_idx + 1].imm */
  756. emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
  757. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
  758. sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
  759. /* skip src_reg adjustment */
  760. emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
  761. clear_src_reg:
  762. /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
  763. reg_mask = src_reg_mask();
  764. emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
  765. emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
  766. emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
  767. emit_ksym_relo_log(gen, relo, kdesc->ref);
  768. }
  769. void bpf_gen__record_relo_core(struct bpf_gen *gen,
  770. const struct bpf_core_relo *core_relo)
  771. {
  772. struct bpf_core_relo *relos;
  773. relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos));
  774. if (!relos) {
  775. gen->error = -ENOMEM;
  776. return;
  777. }
  778. gen->core_relos = relos;
  779. relos += gen->core_relo_cnt;
  780. memcpy(relos, core_relo, sizeof(*relos));
  781. gen->core_relo_cnt++;
  782. }
  783. static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
  784. {
  785. int insn;
  786. pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
  787. insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
  788. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
  789. switch (relo->kind) {
  790. case BTF_KIND_VAR:
  791. if (relo->is_typeless)
  792. emit_relo_ksym_typeless(gen, relo, insn);
  793. else
  794. emit_relo_ksym_btf(gen, relo, insn);
  795. break;
  796. case BTF_KIND_FUNC:
  797. emit_relo_kfunc_btf(gen, relo, insn);
  798. break;
  799. default:
  800. pr_warn("Unknown relocation kind '%d'\n", relo->kind);
  801. gen->error = -EDOM;
  802. return;
  803. }
  804. }
  805. static void emit_relos(struct bpf_gen *gen, int insns)
  806. {
  807. int i;
  808. for (i = 0; i < gen->relo_cnt; i++)
  809. emit_relo(gen, gen->relos + i, insns);
  810. }
  811. static void cleanup_core_relo(struct bpf_gen *gen)
  812. {
  813. if (!gen->core_relo_cnt)
  814. return;
  815. free(gen->core_relos);
  816. gen->core_relo_cnt = 0;
  817. gen->core_relos = NULL;
  818. }
  819. static void cleanup_relos(struct bpf_gen *gen, int insns)
  820. {
  821. int i, insn;
  822. for (i = 0; i < gen->nr_ksyms; i++) {
  823. /* only close fds for typed ksyms and kfuncs */
  824. if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
  825. /* close fd recorded in insn[insn_idx + 1].imm */
  826. insn = gen->ksyms[i].insn;
  827. insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
  828. emit_sys_close_blob(gen, insn);
  829. } else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
  830. emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
  831. if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
  832. gen->nr_fd_array--;
  833. }
  834. }
  835. if (gen->nr_ksyms) {
  836. free(gen->ksyms);
  837. gen->nr_ksyms = 0;
  838. gen->ksyms = NULL;
  839. }
  840. if (gen->relo_cnt) {
  841. free(gen->relos);
  842. gen->relo_cnt = 0;
  843. gen->relos = NULL;
  844. }
  845. cleanup_core_relo(gen);
  846. }
  847. void bpf_gen__prog_load(struct bpf_gen *gen,
  848. enum bpf_prog_type prog_type, const char *prog_name,
  849. const char *license, struct bpf_insn *insns, size_t insn_cnt,
  850. struct bpf_prog_load_opts *load_attr, int prog_idx)
  851. {
  852. int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
  853. int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
  854. union bpf_attr attr;
  855. memset(&attr, 0, attr_size);
  856. pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
  857. prog_type, insn_cnt, prog_idx);
  858. /* add license string to blob of bytes */
  859. license_off = add_data(gen, license, strlen(license) + 1);
  860. /* add insns to blob of bytes */
  861. insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
  862. attr.prog_type = prog_type;
  863. attr.expected_attach_type = load_attr->expected_attach_type;
  864. attr.attach_btf_id = load_attr->attach_btf_id;
  865. attr.prog_ifindex = load_attr->prog_ifindex;
  866. attr.kern_version = 0;
  867. attr.insn_cnt = (__u32)insn_cnt;
  868. attr.prog_flags = load_attr->prog_flags;
  869. attr.func_info_rec_size = load_attr->func_info_rec_size;
  870. attr.func_info_cnt = load_attr->func_info_cnt;
  871. func_info = add_data(gen, load_attr->func_info,
  872. attr.func_info_cnt * attr.func_info_rec_size);
  873. attr.line_info_rec_size = load_attr->line_info_rec_size;
  874. attr.line_info_cnt = load_attr->line_info_cnt;
  875. line_info = add_data(gen, load_attr->line_info,
  876. attr.line_info_cnt * attr.line_info_rec_size);
  877. attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
  878. attr.core_relo_cnt = gen->core_relo_cnt;
  879. core_relos = add_data(gen, gen->core_relos,
  880. attr.core_relo_cnt * attr.core_relo_rec_size);
  881. libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
  882. prog_load_attr = add_data(gen, &attr, attr_size);
  883. /* populate union bpf_attr with a pointer to license */
  884. emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
  885. /* populate union bpf_attr with a pointer to instructions */
  886. emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
  887. /* populate union bpf_attr with a pointer to func_info */
  888. emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
  889. /* populate union bpf_attr with a pointer to line_info */
  890. emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
  891. /* populate union bpf_attr with a pointer to core_relos */
  892. emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos);
  893. /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
  894. emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
  895. /* populate union bpf_attr with user provided log details */
  896. move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
  897. offsetof(struct bpf_loader_ctx, log_level), false);
  898. move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
  899. offsetof(struct bpf_loader_ctx, log_size), false);
  900. move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
  901. offsetof(struct bpf_loader_ctx, log_buf), false);
  902. /* populate union bpf_attr with btf_fd saved in the stack earlier */
  903. move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
  904. stack_off(btf_fd));
  905. if (gen->attach_kind) {
  906. emit_find_attach_target(gen);
  907. /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
  908. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
  909. 0, 0, 0, prog_load_attr));
  910. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
  911. offsetof(union bpf_attr, attach_btf_id)));
  912. emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
  913. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
  914. offsetof(union bpf_attr, attach_btf_obj_fd)));
  915. }
  916. emit_relos(gen, insns_off);
  917. /* emit PROG_LOAD command */
  918. emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
  919. debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
  920. /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
  921. cleanup_relos(gen, insns_off);
  922. if (gen->attach_kind) {
  923. emit_sys_close_blob(gen,
  924. attr_field(prog_load_attr, attach_btf_obj_fd));
  925. gen->attach_kind = 0;
  926. }
  927. emit_check_err(gen);
  928. /* remember prog_fd in the stack, if successful */
  929. emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
  930. stack_off(prog_fd[gen->nr_progs])));
  931. gen->nr_progs++;
  932. }
  933. void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
  934. __u32 value_size)
  935. {
  936. int attr_size = offsetofend(union bpf_attr, flags);
  937. int map_update_attr, value, key;
  938. union bpf_attr attr;
  939. int zero = 0;
  940. memset(&attr, 0, attr_size);
  941. pr_debug("gen: map_update_elem: idx %d\n", map_idx);
  942. value = add_data(gen, pvalue, value_size);
  943. key = add_data(gen, &zero, sizeof(zero));
  944. /* if (map_desc[map_idx].initial_value) {
  945. * if (ctx->flags & BPF_SKEL_KERNEL)
  946. * bpf_probe_read_kernel(value, value_size, initial_value);
  947. * else
  948. * bpf_copy_from_user(value, value_size, initial_value);
  949. * }
  950. */
  951. emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
  952. sizeof(struct bpf_loader_ctx) +
  953. sizeof(struct bpf_map_desc) * map_idx +
  954. offsetof(struct bpf_map_desc, initial_value)));
  955. emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
  956. emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
  957. 0, 0, 0, value));
  958. emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
  959. emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
  960. offsetof(struct bpf_loader_ctx, flags)));
  961. emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
  962. emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
  963. emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
  964. emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
  965. map_update_attr = add_data(gen, &attr, attr_size);
  966. move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
  967. blob_fd_array_off(gen, map_idx));
  968. emit_rel_store(gen, attr_field(map_update_attr, key), key);
  969. emit_rel_store(gen, attr_field(map_update_attr, value), value);
  970. /* emit MAP_UPDATE_ELEM command */
  971. emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
  972. debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
  973. emit_check_err(gen);
  974. }
  975. void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot,
  976. int inner_map_idx)
  977. {
  978. int attr_size = offsetofend(union bpf_attr, flags);
  979. int map_update_attr, key;
  980. union bpf_attr attr;
  981. memset(&attr, 0, attr_size);
  982. pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
  983. outer_map_idx, slot, inner_map_idx);
  984. key = add_data(gen, &slot, sizeof(slot));
  985. map_update_attr = add_data(gen, &attr, attr_size);
  986. move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
  987. blob_fd_array_off(gen, outer_map_idx));
  988. emit_rel_store(gen, attr_field(map_update_attr, key), key);
  989. emit_rel_store(gen, attr_field(map_update_attr, value),
  990. blob_fd_array_off(gen, inner_map_idx));
  991. /* emit MAP_UPDATE_ELEM command */
  992. emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
  993. debug_ret(gen, "populate_outer_map outer %d key %d inner %d",
  994. outer_map_idx, slot, inner_map_idx);
  995. emit_check_err(gen);
  996. }
  997. void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
  998. {
  999. int attr_size = offsetofend(union bpf_attr, map_fd);
  1000. int map_freeze_attr;
  1001. union bpf_attr attr;
  1002. memset(&attr, 0, attr_size);
  1003. pr_debug("gen: map_freeze: idx %d\n", map_idx);
  1004. map_freeze_attr = add_data(gen, &attr, attr_size);
  1005. move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
  1006. blob_fd_array_off(gen, map_idx));
  1007. /* emit MAP_FREEZE command */
  1008. emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
  1009. debug_ret(gen, "map_freeze");
  1010. emit_check_err(gen);
  1011. }