bpf_jit.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * BPF JIT compiler for LoongArch
  4. *
  5. * Copyright (C) 2022 Loongson Technology Corporation Limited
  6. */
  7. #include "bpf_jit.h"
  8. #define REG_TCC LOONGARCH_GPR_A6
  9. #define TCC_SAVED LOONGARCH_GPR_S5
  10. #define SAVE_RA BIT(0)
  11. #define SAVE_TCC BIT(1)
  12. static const int regmap[] = {
  13. /* return value from in-kernel function, and exit value for eBPF program */
  14. [BPF_REG_0] = LOONGARCH_GPR_A5,
  15. /* arguments from eBPF program to in-kernel function */
  16. [BPF_REG_1] = LOONGARCH_GPR_A0,
  17. [BPF_REG_2] = LOONGARCH_GPR_A1,
  18. [BPF_REG_3] = LOONGARCH_GPR_A2,
  19. [BPF_REG_4] = LOONGARCH_GPR_A3,
  20. [BPF_REG_5] = LOONGARCH_GPR_A4,
  21. /* callee saved registers that in-kernel function will preserve */
  22. [BPF_REG_6] = LOONGARCH_GPR_S0,
  23. [BPF_REG_7] = LOONGARCH_GPR_S1,
  24. [BPF_REG_8] = LOONGARCH_GPR_S2,
  25. [BPF_REG_9] = LOONGARCH_GPR_S3,
  26. /* read-only frame pointer to access stack */
  27. [BPF_REG_FP] = LOONGARCH_GPR_S4,
  28. /* temporary register for blinding constants */
  29. [BPF_REG_AX] = LOONGARCH_GPR_T0,
  30. };
  31. static void mark_call(struct jit_ctx *ctx)
  32. {
  33. ctx->flags |= SAVE_RA;
  34. }
  35. static void mark_tail_call(struct jit_ctx *ctx)
  36. {
  37. ctx->flags |= SAVE_TCC;
  38. }
  39. static bool seen_call(struct jit_ctx *ctx)
  40. {
  41. return (ctx->flags & SAVE_RA);
  42. }
  43. static bool seen_tail_call(struct jit_ctx *ctx)
  44. {
  45. return (ctx->flags & SAVE_TCC);
  46. }
  47. static u8 tail_call_reg(struct jit_ctx *ctx)
  48. {
  49. if (seen_call(ctx))
  50. return TCC_SAVED;
  51. return REG_TCC;
  52. }
  53. /*
  54. * eBPF prog stack layout:
  55. *
  56. * high
  57. * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
  58. * | $ra |
  59. * +-------------------------+
  60. * | $fp |
  61. * +-------------------------+
  62. * | $s0 |
  63. * +-------------------------+
  64. * | $s1 |
  65. * +-------------------------+
  66. * | $s2 |
  67. * +-------------------------+
  68. * | $s3 |
  69. * +-------------------------+
  70. * | $s4 |
  71. * +-------------------------+
  72. * | $s5 |
  73. * +-------------------------+ <--BPF_REG_FP
  74. * | prog->aux->stack_depth |
  75. * | (optional) |
  76. * current $sp -------------> +-------------------------+
  77. * low
  78. */
  79. static void build_prologue(struct jit_ctx *ctx)
  80. {
  81. int stack_adjust = 0, store_offset, bpf_stack_adjust;
  82. bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
  83. /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
  84. stack_adjust += sizeof(long) * 8;
  85. stack_adjust = round_up(stack_adjust, 16);
  86. stack_adjust += bpf_stack_adjust;
  87. /*
  88. * First instruction initializes the tail call count (TCC).
  89. * On tail call we skip this instruction, and the TCC is
  90. * passed in REG_TCC from the caller.
  91. */
  92. emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
  93. emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
  94. store_offset = stack_adjust - sizeof(long);
  95. emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
  96. store_offset -= sizeof(long);
  97. emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
  98. store_offset -= sizeof(long);
  99. emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
  100. store_offset -= sizeof(long);
  101. emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
  102. store_offset -= sizeof(long);
  103. emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
  104. store_offset -= sizeof(long);
  105. emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
  106. store_offset -= sizeof(long);
  107. emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
  108. store_offset -= sizeof(long);
  109. emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
  110. emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
  111. if (bpf_stack_adjust)
  112. emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
  113. /*
  114. * Program contains calls and tail calls, so REG_TCC need
  115. * to be saved across calls.
  116. */
  117. if (seen_tail_call(ctx) && seen_call(ctx))
  118. move_reg(ctx, TCC_SAVED, REG_TCC);
  119. ctx->stack_size = stack_adjust;
  120. }
  121. static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
  122. {
  123. int stack_adjust = ctx->stack_size;
  124. int load_offset;
  125. load_offset = stack_adjust - sizeof(long);
  126. emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
  127. load_offset -= sizeof(long);
  128. emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
  129. load_offset -= sizeof(long);
  130. emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
  131. load_offset -= sizeof(long);
  132. emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
  133. load_offset -= sizeof(long);
  134. emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
  135. load_offset -= sizeof(long);
  136. emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
  137. load_offset -= sizeof(long);
  138. emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
  139. load_offset -= sizeof(long);
  140. emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
  141. emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
  142. if (!is_tail_call) {
  143. /* Set return value */
  144. move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
  145. /* Return to the caller */
  146. emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
  147. } else {
  148. /*
  149. * Call the next bpf prog and skip the first instruction
  150. * of TCC initialization.
  151. */
  152. emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
  153. }
  154. }
  155. static void build_epilogue(struct jit_ctx *ctx)
  156. {
  157. __build_epilogue(ctx, false);
  158. }
  159. bool bpf_jit_supports_kfunc_call(void)
  160. {
  161. return true;
  162. }
  163. /* initialized on the first pass of build_body() */
  164. static int out_offset = -1;
  165. static int emit_bpf_tail_call(struct jit_ctx *ctx)
  166. {
  167. int off;
  168. u8 tcc = tail_call_reg(ctx);
  169. u8 a1 = LOONGARCH_GPR_A1;
  170. u8 a2 = LOONGARCH_GPR_A2;
  171. u8 t1 = LOONGARCH_GPR_T1;
  172. u8 t2 = LOONGARCH_GPR_T2;
  173. u8 t3 = LOONGARCH_GPR_T3;
  174. const int idx0 = ctx->idx;
  175. #define cur_offset (ctx->idx - idx0)
  176. #define jmp_offset (out_offset - (cur_offset))
  177. /*
  178. * a0: &ctx
  179. * a1: &array
  180. * a2: index
  181. *
  182. * if (index >= array->map.max_entries)
  183. * goto out;
  184. */
  185. off = offsetof(struct bpf_array, map.max_entries);
  186. emit_insn(ctx, ldwu, t1, a1, off);
  187. /* bgeu $a2, $t1, jmp_offset */
  188. if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
  189. goto toofar;
  190. /*
  191. * if (--TCC < 0)
  192. * goto out;
  193. */
  194. emit_insn(ctx, addid, REG_TCC, tcc, -1);
  195. if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
  196. goto toofar;
  197. /*
  198. * prog = array->ptrs[index];
  199. * if (!prog)
  200. * goto out;
  201. */
  202. emit_insn(ctx, alsld, t2, a2, a1, 2);
  203. off = offsetof(struct bpf_array, ptrs);
  204. emit_insn(ctx, ldd, t2, t2, off);
  205. /* beq $t2, $zero, jmp_offset */
  206. if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
  207. goto toofar;
  208. /* goto *(prog->bpf_func + 4); */
  209. off = offsetof(struct bpf_prog, bpf_func);
  210. emit_insn(ctx, ldd, t3, t2, off);
  211. __build_epilogue(ctx, true);
  212. /* out: */
  213. if (out_offset == -1)
  214. out_offset = cur_offset;
  215. if (cur_offset != out_offset) {
  216. pr_err_once("tail_call out_offset = %d, expected %d!\n",
  217. cur_offset, out_offset);
  218. return -1;
  219. }
  220. return 0;
  221. toofar:
  222. pr_info_once("tail_call: jump too far\n");
  223. return -1;
  224. #undef cur_offset
  225. #undef jmp_offset
  226. }
  227. static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
  228. {
  229. const u8 t1 = LOONGARCH_GPR_T1;
  230. const u8 t2 = LOONGARCH_GPR_T2;
  231. const u8 t3 = LOONGARCH_GPR_T3;
  232. const u8 r0 = regmap[BPF_REG_0];
  233. const u8 src = regmap[insn->src_reg];
  234. const u8 dst = regmap[insn->dst_reg];
  235. const s16 off = insn->off;
  236. const s32 imm = insn->imm;
  237. const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
  238. move_imm(ctx, t1, off, false);
  239. emit_insn(ctx, addd, t1, dst, t1);
  240. move_reg(ctx, t3, src);
  241. switch (imm) {
  242. /* lock *(size *)(dst + off) <op>= src */
  243. case BPF_ADD:
  244. if (isdw)
  245. emit_insn(ctx, amaddd, t2, t1, src);
  246. else
  247. emit_insn(ctx, amaddw, t2, t1, src);
  248. break;
  249. case BPF_AND:
  250. if (isdw)
  251. emit_insn(ctx, amandd, t2, t1, src);
  252. else
  253. emit_insn(ctx, amandw, t2, t1, src);
  254. break;
  255. case BPF_OR:
  256. if (isdw)
  257. emit_insn(ctx, amord, t2, t1, src);
  258. else
  259. emit_insn(ctx, amorw, t2, t1, src);
  260. break;
  261. case BPF_XOR:
  262. if (isdw)
  263. emit_insn(ctx, amxord, t2, t1, src);
  264. else
  265. emit_insn(ctx, amxorw, t2, t1, src);
  266. break;
  267. /* src = atomic_fetch_<op>(dst + off, src) */
  268. case BPF_ADD | BPF_FETCH:
  269. if (isdw) {
  270. emit_insn(ctx, amaddd, src, t1, t3);
  271. } else {
  272. emit_insn(ctx, amaddw, src, t1, t3);
  273. emit_zext_32(ctx, src, true);
  274. }
  275. break;
  276. case BPF_AND | BPF_FETCH:
  277. if (isdw) {
  278. emit_insn(ctx, amandd, src, t1, t3);
  279. } else {
  280. emit_insn(ctx, amandw, src, t1, t3);
  281. emit_zext_32(ctx, src, true);
  282. }
  283. break;
  284. case BPF_OR | BPF_FETCH:
  285. if (isdw) {
  286. emit_insn(ctx, amord, src, t1, t3);
  287. } else {
  288. emit_insn(ctx, amorw, src, t1, t3);
  289. emit_zext_32(ctx, src, true);
  290. }
  291. break;
  292. case BPF_XOR | BPF_FETCH:
  293. if (isdw) {
  294. emit_insn(ctx, amxord, src, t1, t3);
  295. } else {
  296. emit_insn(ctx, amxorw, src, t1, t3);
  297. emit_zext_32(ctx, src, true);
  298. }
  299. break;
  300. /* src = atomic_xchg(dst + off, src); */
  301. case BPF_XCHG:
  302. if (isdw) {
  303. emit_insn(ctx, amswapd, src, t1, t3);
  304. } else {
  305. emit_insn(ctx, amswapw, src, t1, t3);
  306. emit_zext_32(ctx, src, true);
  307. }
  308. break;
  309. /* r0 = atomic_cmpxchg(dst + off, r0, src); */
  310. case BPF_CMPXCHG:
  311. move_reg(ctx, t2, r0);
  312. if (isdw) {
  313. emit_insn(ctx, lld, r0, t1, 0);
  314. emit_insn(ctx, bne, t2, r0, 4);
  315. move_reg(ctx, t3, src);
  316. emit_insn(ctx, scd, t3, t1, 0);
  317. emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
  318. } else {
  319. emit_insn(ctx, llw, r0, t1, 0);
  320. emit_zext_32(ctx, t2, true);
  321. emit_zext_32(ctx, r0, true);
  322. emit_insn(ctx, bne, t2, r0, 4);
  323. move_reg(ctx, t3, src);
  324. emit_insn(ctx, scw, t3, t1, 0);
  325. emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
  326. emit_zext_32(ctx, r0, true);
  327. }
  328. break;
  329. }
  330. }
  331. static bool is_signed_bpf_cond(u8 cond)
  332. {
  333. return cond == BPF_JSGT || cond == BPF_JSLT ||
  334. cond == BPF_JSGE || cond == BPF_JSLE;
  335. }
  336. static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
  337. {
  338. u8 tm = -1;
  339. u64 func_addr;
  340. bool func_addr_fixed;
  341. int i = insn - ctx->prog->insnsi;
  342. int ret, jmp_offset;
  343. const u8 code = insn->code;
  344. const u8 cond = BPF_OP(code);
  345. const u8 t1 = LOONGARCH_GPR_T1;
  346. const u8 t2 = LOONGARCH_GPR_T2;
  347. const u8 src = regmap[insn->src_reg];
  348. const u8 dst = regmap[insn->dst_reg];
  349. const s16 off = insn->off;
  350. const s32 imm = insn->imm;
  351. const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
  352. const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
  353. switch (code) {
  354. /* dst = src */
  355. case BPF_ALU | BPF_MOV | BPF_X:
  356. case BPF_ALU64 | BPF_MOV | BPF_X:
  357. move_reg(ctx, dst, src);
  358. emit_zext_32(ctx, dst, is32);
  359. break;
  360. /* dst = imm */
  361. case BPF_ALU | BPF_MOV | BPF_K:
  362. case BPF_ALU64 | BPF_MOV | BPF_K:
  363. move_imm(ctx, dst, imm, is32);
  364. break;
  365. /* dst = dst + src */
  366. case BPF_ALU | BPF_ADD | BPF_X:
  367. case BPF_ALU64 | BPF_ADD | BPF_X:
  368. emit_insn(ctx, addd, dst, dst, src);
  369. emit_zext_32(ctx, dst, is32);
  370. break;
  371. /* dst = dst + imm */
  372. case BPF_ALU | BPF_ADD | BPF_K:
  373. case BPF_ALU64 | BPF_ADD | BPF_K:
  374. if (is_signed_imm12(imm)) {
  375. emit_insn(ctx, addid, dst, dst, imm);
  376. } else {
  377. move_imm(ctx, t1, imm, is32);
  378. emit_insn(ctx, addd, dst, dst, t1);
  379. }
  380. emit_zext_32(ctx, dst, is32);
  381. break;
  382. /* dst = dst - src */
  383. case BPF_ALU | BPF_SUB | BPF_X:
  384. case BPF_ALU64 | BPF_SUB | BPF_X:
  385. emit_insn(ctx, subd, dst, dst, src);
  386. emit_zext_32(ctx, dst, is32);
  387. break;
  388. /* dst = dst - imm */
  389. case BPF_ALU | BPF_SUB | BPF_K:
  390. case BPF_ALU64 | BPF_SUB | BPF_K:
  391. if (is_signed_imm12(-imm)) {
  392. emit_insn(ctx, addid, dst, dst, -imm);
  393. } else {
  394. move_imm(ctx, t1, imm, is32);
  395. emit_insn(ctx, subd, dst, dst, t1);
  396. }
  397. emit_zext_32(ctx, dst, is32);
  398. break;
  399. /* dst = dst * src */
  400. case BPF_ALU | BPF_MUL | BPF_X:
  401. case BPF_ALU64 | BPF_MUL | BPF_X:
  402. emit_insn(ctx, muld, dst, dst, src);
  403. emit_zext_32(ctx, dst, is32);
  404. break;
  405. /* dst = dst * imm */
  406. case BPF_ALU | BPF_MUL | BPF_K:
  407. case BPF_ALU64 | BPF_MUL | BPF_K:
  408. move_imm(ctx, t1, imm, is32);
  409. emit_insn(ctx, muld, dst, dst, t1);
  410. emit_zext_32(ctx, dst, is32);
  411. break;
  412. /* dst = dst / src */
  413. case BPF_ALU | BPF_DIV | BPF_X:
  414. case BPF_ALU64 | BPF_DIV | BPF_X:
  415. emit_zext_32(ctx, dst, is32);
  416. move_reg(ctx, t1, src);
  417. emit_zext_32(ctx, t1, is32);
  418. emit_insn(ctx, divdu, dst, dst, t1);
  419. emit_zext_32(ctx, dst, is32);
  420. break;
  421. /* dst = dst / imm */
  422. case BPF_ALU | BPF_DIV | BPF_K:
  423. case BPF_ALU64 | BPF_DIV | BPF_K:
  424. move_imm(ctx, t1, imm, is32);
  425. emit_zext_32(ctx, dst, is32);
  426. emit_insn(ctx, divdu, dst, dst, t1);
  427. emit_zext_32(ctx, dst, is32);
  428. break;
  429. /* dst = dst % src */
  430. case BPF_ALU | BPF_MOD | BPF_X:
  431. case BPF_ALU64 | BPF_MOD | BPF_X:
  432. emit_zext_32(ctx, dst, is32);
  433. move_reg(ctx, t1, src);
  434. emit_zext_32(ctx, t1, is32);
  435. emit_insn(ctx, moddu, dst, dst, t1);
  436. emit_zext_32(ctx, dst, is32);
  437. break;
  438. /* dst = dst % imm */
  439. case BPF_ALU | BPF_MOD | BPF_K:
  440. case BPF_ALU64 | BPF_MOD | BPF_K:
  441. move_imm(ctx, t1, imm, is32);
  442. emit_zext_32(ctx, dst, is32);
  443. emit_insn(ctx, moddu, dst, dst, t1);
  444. emit_zext_32(ctx, dst, is32);
  445. break;
  446. /* dst = -dst */
  447. case BPF_ALU | BPF_NEG:
  448. case BPF_ALU64 | BPF_NEG:
  449. move_imm(ctx, t1, imm, is32);
  450. emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
  451. emit_zext_32(ctx, dst, is32);
  452. break;
  453. /* dst = dst & src */
  454. case BPF_ALU | BPF_AND | BPF_X:
  455. case BPF_ALU64 | BPF_AND | BPF_X:
  456. emit_insn(ctx, and, dst, dst, src);
  457. emit_zext_32(ctx, dst, is32);
  458. break;
  459. /* dst = dst & imm */
  460. case BPF_ALU | BPF_AND | BPF_K:
  461. case BPF_ALU64 | BPF_AND | BPF_K:
  462. if (is_unsigned_imm12(imm)) {
  463. emit_insn(ctx, andi, dst, dst, imm);
  464. } else {
  465. move_imm(ctx, t1, imm, is32);
  466. emit_insn(ctx, and, dst, dst, t1);
  467. }
  468. emit_zext_32(ctx, dst, is32);
  469. break;
  470. /* dst = dst | src */
  471. case BPF_ALU | BPF_OR | BPF_X:
  472. case BPF_ALU64 | BPF_OR | BPF_X:
  473. emit_insn(ctx, or, dst, dst, src);
  474. emit_zext_32(ctx, dst, is32);
  475. break;
  476. /* dst = dst | imm */
  477. case BPF_ALU | BPF_OR | BPF_K:
  478. case BPF_ALU64 | BPF_OR | BPF_K:
  479. if (is_unsigned_imm12(imm)) {
  480. emit_insn(ctx, ori, dst, dst, imm);
  481. } else {
  482. move_imm(ctx, t1, imm, is32);
  483. emit_insn(ctx, or, dst, dst, t1);
  484. }
  485. emit_zext_32(ctx, dst, is32);
  486. break;
  487. /* dst = dst ^ src */
  488. case BPF_ALU | BPF_XOR | BPF_X:
  489. case BPF_ALU64 | BPF_XOR | BPF_X:
  490. emit_insn(ctx, xor, dst, dst, src);
  491. emit_zext_32(ctx, dst, is32);
  492. break;
  493. /* dst = dst ^ imm */
  494. case BPF_ALU | BPF_XOR | BPF_K:
  495. case BPF_ALU64 | BPF_XOR | BPF_K:
  496. if (is_unsigned_imm12(imm)) {
  497. emit_insn(ctx, xori, dst, dst, imm);
  498. } else {
  499. move_imm(ctx, t1, imm, is32);
  500. emit_insn(ctx, xor, dst, dst, t1);
  501. }
  502. emit_zext_32(ctx, dst, is32);
  503. break;
  504. /* dst = dst << src (logical) */
  505. case BPF_ALU | BPF_LSH | BPF_X:
  506. emit_insn(ctx, sllw, dst, dst, src);
  507. emit_zext_32(ctx, dst, is32);
  508. break;
  509. case BPF_ALU64 | BPF_LSH | BPF_X:
  510. emit_insn(ctx, slld, dst, dst, src);
  511. break;
  512. /* dst = dst << imm (logical) */
  513. case BPF_ALU | BPF_LSH | BPF_K:
  514. emit_insn(ctx, slliw, dst, dst, imm);
  515. emit_zext_32(ctx, dst, is32);
  516. break;
  517. case BPF_ALU64 | BPF_LSH | BPF_K:
  518. emit_insn(ctx, sllid, dst, dst, imm);
  519. break;
  520. /* dst = dst >> src (logical) */
  521. case BPF_ALU | BPF_RSH | BPF_X:
  522. emit_insn(ctx, srlw, dst, dst, src);
  523. emit_zext_32(ctx, dst, is32);
  524. break;
  525. case BPF_ALU64 | BPF_RSH | BPF_X:
  526. emit_insn(ctx, srld, dst, dst, src);
  527. break;
  528. /* dst = dst >> imm (logical) */
  529. case BPF_ALU | BPF_RSH | BPF_K:
  530. emit_insn(ctx, srliw, dst, dst, imm);
  531. emit_zext_32(ctx, dst, is32);
  532. break;
  533. case BPF_ALU64 | BPF_RSH | BPF_K:
  534. emit_insn(ctx, srlid, dst, dst, imm);
  535. break;
  536. /* dst = dst >> src (arithmetic) */
  537. case BPF_ALU | BPF_ARSH | BPF_X:
  538. emit_insn(ctx, sraw, dst, dst, src);
  539. emit_zext_32(ctx, dst, is32);
  540. break;
  541. case BPF_ALU64 | BPF_ARSH | BPF_X:
  542. emit_insn(ctx, srad, dst, dst, src);
  543. break;
  544. /* dst = dst >> imm (arithmetic) */
  545. case BPF_ALU | BPF_ARSH | BPF_K:
  546. emit_insn(ctx, sraiw, dst, dst, imm);
  547. emit_zext_32(ctx, dst, is32);
  548. break;
  549. case BPF_ALU64 | BPF_ARSH | BPF_K:
  550. emit_insn(ctx, sraid, dst, dst, imm);
  551. break;
  552. /* dst = BSWAP##imm(dst) */
  553. case BPF_ALU | BPF_END | BPF_FROM_LE:
  554. switch (imm) {
  555. case 16:
  556. /* zero-extend 16 bits into 64 bits */
  557. emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
  558. break;
  559. case 32:
  560. /* zero-extend 32 bits into 64 bits */
  561. emit_zext_32(ctx, dst, is32);
  562. break;
  563. case 64:
  564. /* do nothing */
  565. break;
  566. }
  567. break;
  568. case BPF_ALU | BPF_END | BPF_FROM_BE:
  569. switch (imm) {
  570. case 16:
  571. emit_insn(ctx, revb2h, dst, dst);
  572. /* zero-extend 16 bits into 64 bits */
  573. emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
  574. break;
  575. case 32:
  576. emit_insn(ctx, revb2w, dst, dst);
  577. /* zero-extend 32 bits into 64 bits */
  578. emit_zext_32(ctx, dst, is32);
  579. break;
  580. case 64:
  581. emit_insn(ctx, revbd, dst, dst);
  582. break;
  583. }
  584. break;
  585. /* PC += off if dst cond src */
  586. case BPF_JMP | BPF_JEQ | BPF_X:
  587. case BPF_JMP | BPF_JNE | BPF_X:
  588. case BPF_JMP | BPF_JGT | BPF_X:
  589. case BPF_JMP | BPF_JGE | BPF_X:
  590. case BPF_JMP | BPF_JLT | BPF_X:
  591. case BPF_JMP | BPF_JLE | BPF_X:
  592. case BPF_JMP | BPF_JSGT | BPF_X:
  593. case BPF_JMP | BPF_JSGE | BPF_X:
  594. case BPF_JMP | BPF_JSLT | BPF_X:
  595. case BPF_JMP | BPF_JSLE | BPF_X:
  596. case BPF_JMP32 | BPF_JEQ | BPF_X:
  597. case BPF_JMP32 | BPF_JNE | BPF_X:
  598. case BPF_JMP32 | BPF_JGT | BPF_X:
  599. case BPF_JMP32 | BPF_JGE | BPF_X:
  600. case BPF_JMP32 | BPF_JLT | BPF_X:
  601. case BPF_JMP32 | BPF_JLE | BPF_X:
  602. case BPF_JMP32 | BPF_JSGT | BPF_X:
  603. case BPF_JMP32 | BPF_JSGE | BPF_X:
  604. case BPF_JMP32 | BPF_JSLT | BPF_X:
  605. case BPF_JMP32 | BPF_JSLE | BPF_X:
  606. jmp_offset = bpf2la_offset(i, off, ctx);
  607. move_reg(ctx, t1, dst);
  608. move_reg(ctx, t2, src);
  609. if (is_signed_bpf_cond(BPF_OP(code))) {
  610. emit_sext_32(ctx, t1, is32);
  611. emit_sext_32(ctx, t2, is32);
  612. } else {
  613. emit_zext_32(ctx, t1, is32);
  614. emit_zext_32(ctx, t2, is32);
  615. }
  616. if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
  617. goto toofar;
  618. break;
  619. /* PC += off if dst cond imm */
  620. case BPF_JMP | BPF_JEQ | BPF_K:
  621. case BPF_JMP | BPF_JNE | BPF_K:
  622. case BPF_JMP | BPF_JGT | BPF_K:
  623. case BPF_JMP | BPF_JGE | BPF_K:
  624. case BPF_JMP | BPF_JLT | BPF_K:
  625. case BPF_JMP | BPF_JLE | BPF_K:
  626. case BPF_JMP | BPF_JSGT | BPF_K:
  627. case BPF_JMP | BPF_JSGE | BPF_K:
  628. case BPF_JMP | BPF_JSLT | BPF_K:
  629. case BPF_JMP | BPF_JSLE | BPF_K:
  630. case BPF_JMP32 | BPF_JEQ | BPF_K:
  631. case BPF_JMP32 | BPF_JNE | BPF_K:
  632. case BPF_JMP32 | BPF_JGT | BPF_K:
  633. case BPF_JMP32 | BPF_JGE | BPF_K:
  634. case BPF_JMP32 | BPF_JLT | BPF_K:
  635. case BPF_JMP32 | BPF_JLE | BPF_K:
  636. case BPF_JMP32 | BPF_JSGT | BPF_K:
  637. case BPF_JMP32 | BPF_JSGE | BPF_K:
  638. case BPF_JMP32 | BPF_JSLT | BPF_K:
  639. case BPF_JMP32 | BPF_JSLE | BPF_K:
  640. jmp_offset = bpf2la_offset(i, off, ctx);
  641. if (imm) {
  642. move_imm(ctx, t1, imm, false);
  643. tm = t1;
  644. } else {
  645. /* If imm is 0, simply use zero register. */
  646. tm = LOONGARCH_GPR_ZERO;
  647. }
  648. move_reg(ctx, t2, dst);
  649. if (is_signed_bpf_cond(BPF_OP(code))) {
  650. emit_sext_32(ctx, tm, is32);
  651. emit_sext_32(ctx, t2, is32);
  652. } else {
  653. emit_zext_32(ctx, tm, is32);
  654. emit_zext_32(ctx, t2, is32);
  655. }
  656. if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
  657. goto toofar;
  658. break;
  659. /* PC += off if dst & src */
  660. case BPF_JMP | BPF_JSET | BPF_X:
  661. case BPF_JMP32 | BPF_JSET | BPF_X:
  662. jmp_offset = bpf2la_offset(i, off, ctx);
  663. emit_insn(ctx, and, t1, dst, src);
  664. emit_zext_32(ctx, t1, is32);
  665. if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
  666. goto toofar;
  667. break;
  668. /* PC += off if dst & imm */
  669. case BPF_JMP | BPF_JSET | BPF_K:
  670. case BPF_JMP32 | BPF_JSET | BPF_K:
  671. jmp_offset = bpf2la_offset(i, off, ctx);
  672. move_imm(ctx, t1, imm, is32);
  673. emit_insn(ctx, and, t1, dst, t1);
  674. emit_zext_32(ctx, t1, is32);
  675. if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
  676. goto toofar;
  677. break;
  678. /* PC += off */
  679. case BPF_JMP | BPF_JA:
  680. jmp_offset = bpf2la_offset(i, off, ctx);
  681. if (emit_uncond_jmp(ctx, jmp_offset) < 0)
  682. goto toofar;
  683. break;
  684. /* function call */
  685. case BPF_JMP | BPF_CALL:
  686. mark_call(ctx);
  687. ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
  688. &func_addr, &func_addr_fixed);
  689. if (ret < 0)
  690. return ret;
  691. move_addr(ctx, t1, func_addr);
  692. emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
  693. move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
  694. break;
  695. /* tail call */
  696. case BPF_JMP | BPF_TAIL_CALL:
  697. mark_tail_call(ctx);
  698. if (emit_bpf_tail_call(ctx) < 0)
  699. return -EINVAL;
  700. break;
  701. /* function return */
  702. case BPF_JMP | BPF_EXIT:
  703. if (i == ctx->prog->len - 1)
  704. break;
  705. jmp_offset = epilogue_offset(ctx);
  706. if (emit_uncond_jmp(ctx, jmp_offset) < 0)
  707. goto toofar;
  708. break;
  709. /* dst = imm64 */
  710. case BPF_LD | BPF_IMM | BPF_DW:
  711. move_imm(ctx, dst, imm64, is32);
  712. return 1;
  713. /* dst = *(size *)(src + off) */
  714. case BPF_LDX | BPF_MEM | BPF_B:
  715. case BPF_LDX | BPF_MEM | BPF_H:
  716. case BPF_LDX | BPF_MEM | BPF_W:
  717. case BPF_LDX | BPF_MEM | BPF_DW:
  718. switch (BPF_SIZE(code)) {
  719. case BPF_B:
  720. if (is_signed_imm12(off)) {
  721. emit_insn(ctx, ldbu, dst, src, off);
  722. } else {
  723. move_imm(ctx, t1, off, is32);
  724. emit_insn(ctx, ldxbu, dst, src, t1);
  725. }
  726. break;
  727. case BPF_H:
  728. if (is_signed_imm12(off)) {
  729. emit_insn(ctx, ldhu, dst, src, off);
  730. } else {
  731. move_imm(ctx, t1, off, is32);
  732. emit_insn(ctx, ldxhu, dst, src, t1);
  733. }
  734. break;
  735. case BPF_W:
  736. if (is_signed_imm12(off)) {
  737. emit_insn(ctx, ldwu, dst, src, off);
  738. } else if (is_signed_imm14(off)) {
  739. emit_insn(ctx, ldptrw, dst, src, off);
  740. } else {
  741. move_imm(ctx, t1, off, is32);
  742. emit_insn(ctx, ldxwu, dst, src, t1);
  743. }
  744. break;
  745. case BPF_DW:
  746. move_imm(ctx, t1, off, is32);
  747. emit_insn(ctx, ldxd, dst, src, t1);
  748. break;
  749. }
  750. break;
  751. /* *(size *)(dst + off) = imm */
  752. case BPF_ST | BPF_MEM | BPF_B:
  753. case BPF_ST | BPF_MEM | BPF_H:
  754. case BPF_ST | BPF_MEM | BPF_W:
  755. case BPF_ST | BPF_MEM | BPF_DW:
  756. switch (BPF_SIZE(code)) {
  757. case BPF_B:
  758. move_imm(ctx, t1, imm, is32);
  759. if (is_signed_imm12(off)) {
  760. emit_insn(ctx, stb, t1, dst, off);
  761. } else {
  762. move_imm(ctx, t2, off, is32);
  763. emit_insn(ctx, stxb, t1, dst, t2);
  764. }
  765. break;
  766. case BPF_H:
  767. move_imm(ctx, t1, imm, is32);
  768. if (is_signed_imm12(off)) {
  769. emit_insn(ctx, sth, t1, dst, off);
  770. } else {
  771. move_imm(ctx, t2, off, is32);
  772. emit_insn(ctx, stxh, t1, dst, t2);
  773. }
  774. break;
  775. case BPF_W:
  776. move_imm(ctx, t1, imm, is32);
  777. if (is_signed_imm12(off)) {
  778. emit_insn(ctx, stw, t1, dst, off);
  779. } else if (is_signed_imm14(off)) {
  780. emit_insn(ctx, stptrw, t1, dst, off);
  781. } else {
  782. move_imm(ctx, t2, off, is32);
  783. emit_insn(ctx, stxw, t1, dst, t2);
  784. }
  785. break;
  786. case BPF_DW:
  787. move_imm(ctx, t1, imm, is32);
  788. if (is_signed_imm12(off)) {
  789. emit_insn(ctx, std, t1, dst, off);
  790. } else if (is_signed_imm14(off)) {
  791. emit_insn(ctx, stptrd, t1, dst, off);
  792. } else {
  793. move_imm(ctx, t2, off, is32);
  794. emit_insn(ctx, stxd, t1, dst, t2);
  795. }
  796. break;
  797. }
  798. break;
  799. /* *(size *)(dst + off) = src */
  800. case BPF_STX | BPF_MEM | BPF_B:
  801. case BPF_STX | BPF_MEM | BPF_H:
  802. case BPF_STX | BPF_MEM | BPF_W:
  803. case BPF_STX | BPF_MEM | BPF_DW:
  804. switch (BPF_SIZE(code)) {
  805. case BPF_B:
  806. if (is_signed_imm12(off)) {
  807. emit_insn(ctx, stb, src, dst, off);
  808. } else {
  809. move_imm(ctx, t1, off, is32);
  810. emit_insn(ctx, stxb, src, dst, t1);
  811. }
  812. break;
  813. case BPF_H:
  814. if (is_signed_imm12(off)) {
  815. emit_insn(ctx, sth, src, dst, off);
  816. } else {
  817. move_imm(ctx, t1, off, is32);
  818. emit_insn(ctx, stxh, src, dst, t1);
  819. }
  820. break;
  821. case BPF_W:
  822. if (is_signed_imm12(off)) {
  823. emit_insn(ctx, stw, src, dst, off);
  824. } else if (is_signed_imm14(off)) {
  825. emit_insn(ctx, stptrw, src, dst, off);
  826. } else {
  827. move_imm(ctx, t1, off, is32);
  828. emit_insn(ctx, stxw, src, dst, t1);
  829. }
  830. break;
  831. case BPF_DW:
  832. if (is_signed_imm12(off)) {
  833. emit_insn(ctx, std, src, dst, off);
  834. } else if (is_signed_imm14(off)) {
  835. emit_insn(ctx, stptrd, src, dst, off);
  836. } else {
  837. move_imm(ctx, t1, off, is32);
  838. emit_insn(ctx, stxd, src, dst, t1);
  839. }
  840. break;
  841. }
  842. break;
  843. case BPF_STX | BPF_ATOMIC | BPF_W:
  844. case BPF_STX | BPF_ATOMIC | BPF_DW:
  845. emit_atomic(insn, ctx);
  846. break;
  847. /* Speculation barrier */
  848. case BPF_ST | BPF_NOSPEC:
  849. break;
  850. default:
  851. pr_err("bpf_jit: unknown opcode %02x\n", code);
  852. return -EINVAL;
  853. }
  854. return 0;
  855. toofar:
  856. pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
  857. return -E2BIG;
  858. }
  859. static int build_body(struct jit_ctx *ctx, bool extra_pass)
  860. {
  861. int i;
  862. const struct bpf_prog *prog = ctx->prog;
  863. for (i = 0; i < prog->len; i++) {
  864. const struct bpf_insn *insn = &prog->insnsi[i];
  865. int ret;
  866. if (ctx->image == NULL)
  867. ctx->offset[i] = ctx->idx;
  868. ret = build_insn(insn, ctx, extra_pass);
  869. if (ret > 0) {
  870. i++;
  871. if (ctx->image == NULL)
  872. ctx->offset[i] = ctx->idx;
  873. continue;
  874. }
  875. if (ret)
  876. return ret;
  877. }
  878. if (ctx->image == NULL)
  879. ctx->offset[i] = ctx->idx;
  880. return 0;
  881. }
  882. /* Fill space with break instructions */
  883. static void jit_fill_hole(void *area, unsigned int size)
  884. {
  885. u32 *ptr;
  886. /* We are guaranteed to have aligned memory */
  887. for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
  888. *ptr++ = INSN_BREAK;
  889. }
  890. static int validate_code(struct jit_ctx *ctx)
  891. {
  892. int i;
  893. union loongarch_instruction insn;
  894. for (i = 0; i < ctx->idx; i++) {
  895. insn = ctx->image[i];
  896. /* Check INSN_BREAK */
  897. if (insn.word == INSN_BREAK)
  898. return -1;
  899. }
  900. return 0;
  901. }
  902. struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
  903. {
  904. bool tmp_blinded = false, extra_pass = false;
  905. u8 *image_ptr;
  906. int image_size;
  907. struct jit_ctx ctx;
  908. struct jit_data *jit_data;
  909. struct bpf_binary_header *header;
  910. struct bpf_prog *tmp, *orig_prog = prog;
  911. /*
  912. * If BPF JIT was not enabled then we must fall back to
  913. * the interpreter.
  914. */
  915. if (!prog->jit_requested)
  916. return orig_prog;
  917. tmp = bpf_jit_blind_constants(prog);
  918. /*
  919. * If blinding was requested and we failed during blinding,
  920. * we must fall back to the interpreter. Otherwise, we save
  921. * the new JITed code.
  922. */
  923. if (IS_ERR(tmp))
  924. return orig_prog;
  925. if (tmp != prog) {
  926. tmp_blinded = true;
  927. prog = tmp;
  928. }
  929. jit_data = prog->aux->jit_data;
  930. if (!jit_data) {
  931. jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
  932. if (!jit_data) {
  933. prog = orig_prog;
  934. goto out;
  935. }
  936. prog->aux->jit_data = jit_data;
  937. }
  938. if (jit_data->ctx.offset) {
  939. ctx = jit_data->ctx;
  940. image_ptr = jit_data->image;
  941. header = jit_data->header;
  942. extra_pass = true;
  943. image_size = sizeof(u32) * ctx.idx;
  944. goto skip_init_ctx;
  945. }
  946. memset(&ctx, 0, sizeof(ctx));
  947. ctx.prog = prog;
  948. ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
  949. if (ctx.offset == NULL) {
  950. prog = orig_prog;
  951. goto out_offset;
  952. }
  953. /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
  954. build_prologue(&ctx);
  955. if (build_body(&ctx, extra_pass)) {
  956. prog = orig_prog;
  957. goto out_offset;
  958. }
  959. ctx.epilogue_offset = ctx.idx;
  960. build_epilogue(&ctx);
  961. /* Now we know the actual image size.
  962. * As each LoongArch instruction is of length 32bit,
  963. * we are translating number of JITed intructions into
  964. * the size required to store these JITed code.
  965. */
  966. image_size = sizeof(u32) * ctx.idx;
  967. /* Now we know the size of the structure to make */
  968. header = bpf_jit_binary_alloc(image_size, &image_ptr,
  969. sizeof(u32), jit_fill_hole);
  970. if (header == NULL) {
  971. prog = orig_prog;
  972. goto out_offset;
  973. }
  974. /* 2. Now, the actual pass to generate final JIT code */
  975. ctx.image = (union loongarch_instruction *)image_ptr;
  976. skip_init_ctx:
  977. ctx.idx = 0;
  978. build_prologue(&ctx);
  979. if (build_body(&ctx, extra_pass)) {
  980. bpf_jit_binary_free(header);
  981. prog = orig_prog;
  982. goto out_offset;
  983. }
  984. build_epilogue(&ctx);
  985. /* 3. Extra pass to validate JITed code */
  986. if (validate_code(&ctx)) {
  987. bpf_jit_binary_free(header);
  988. prog = orig_prog;
  989. goto out_offset;
  990. }
  991. /* And we're done */
  992. if (bpf_jit_enable > 1)
  993. bpf_jit_dump(prog->len, image_size, 2, ctx.image);
  994. /* Update the icache */
  995. flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
  996. if (!prog->is_func || extra_pass) {
  997. if (extra_pass && ctx.idx != jit_data->ctx.idx) {
  998. pr_err_once("multi-func JIT bug %d != %d\n",
  999. ctx.idx, jit_data->ctx.idx);
  1000. bpf_jit_binary_free(header);
  1001. prog->bpf_func = NULL;
  1002. prog->jited = 0;
  1003. prog->jited_len = 0;
  1004. goto out_offset;
  1005. }
  1006. bpf_jit_binary_lock_ro(header);
  1007. } else {
  1008. jit_data->ctx = ctx;
  1009. jit_data->image = image_ptr;
  1010. jit_data->header = header;
  1011. }
  1012. prog->jited = 1;
  1013. prog->jited_len = image_size;
  1014. prog->bpf_func = (void *)ctx.image;
  1015. if (!prog->is_func || extra_pass) {
  1016. int i;
  1017. /* offset[prog->len] is the size of program */
  1018. for (i = 0; i <= prog->len; i++)
  1019. ctx.offset[i] *= LOONGARCH_INSN_SIZE;
  1020. bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
  1021. out_offset:
  1022. kvfree(ctx.offset);
  1023. kfree(jit_data);
  1024. prog->aux->jit_data = NULL;
  1025. }
  1026. out:
  1027. if (tmp_blinded)
  1028. bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
  1029. out_offset = -1;
  1030. return prog;
  1031. }