bpf_jit_comp32.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * BPF JIT compiler for RV32G
  4. *
  5. * Copyright (c) 2020 Luke Nelson <[email protected]>
  6. * Copyright (c) 2020 Xi Wang <[email protected]>
  7. *
  8. * The code is based on the BPF JIT compiler for RV64G by Björn Töpel and
  9. * the BPF JIT compiler for 32-bit ARM by Shubham Bansal and Mircea Gherzan.
  10. */
  11. #include <linux/bpf.h>
  12. #include <linux/filter.h>
  13. #include "bpf_jit.h"
  14. /*
  15. * Stack layout during BPF program execution:
  16. *
  17. * high
  18. * RV32 fp => +----------+
  19. * | saved ra |
  20. * | saved fp | RV32 callee-saved registers
  21. * | ... |
  22. * +----------+ <= (fp - 4 * NR_SAVED_REGISTERS)
  23. * | hi(R6) |
  24. * | lo(R6) |
  25. * | hi(R7) | JIT scratch space for BPF registers
  26. * | lo(R7) |
  27. * | ... |
  28. * BPF_REG_FP => +----------+ <= (fp - 4 * NR_SAVED_REGISTERS
  29. * | | - 4 * BPF_JIT_SCRATCH_REGS)
  30. * | |
  31. * | ... | BPF program stack
  32. * | |
  33. * RV32 sp => +----------+
  34. * | |
  35. * | ... | Function call stack
  36. * | |
  37. * +----------+
  38. * low
  39. */
  40. enum {
  41. /* Stack layout - these are offsets from top of JIT scratch space. */
  42. BPF_R6_HI,
  43. BPF_R6_LO,
  44. BPF_R7_HI,
  45. BPF_R7_LO,
  46. BPF_R8_HI,
  47. BPF_R8_LO,
  48. BPF_R9_HI,
  49. BPF_R9_LO,
  50. BPF_AX_HI,
  51. BPF_AX_LO,
  52. /* Stack space for BPF_REG_6 through BPF_REG_9 and BPF_REG_AX. */
  53. BPF_JIT_SCRATCH_REGS,
  54. };
  55. /* Number of callee-saved registers stored to stack: ra, fp, s1--s7. */
  56. #define NR_SAVED_REGISTERS 9
  57. /* Offset from fp for BPF registers stored on stack. */
  58. #define STACK_OFFSET(k) (-4 - (4 * NR_SAVED_REGISTERS) - (4 * (k)))
  59. #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
  60. #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
  61. #define RV_REG_TCC RV_REG_T6
  62. #define RV_REG_TCC_SAVED RV_REG_S7
  63. static const s8 bpf2rv32[][2] = {
  64. /* Return value from in-kernel function, and exit value from eBPF. */
  65. [BPF_REG_0] = {RV_REG_S2, RV_REG_S1},
  66. /* Arguments from eBPF program to in-kernel function. */
  67. [BPF_REG_1] = {RV_REG_A1, RV_REG_A0},
  68. [BPF_REG_2] = {RV_REG_A3, RV_REG_A2},
  69. [BPF_REG_3] = {RV_REG_A5, RV_REG_A4},
  70. [BPF_REG_4] = {RV_REG_A7, RV_REG_A6},
  71. [BPF_REG_5] = {RV_REG_S4, RV_REG_S3},
  72. /*
  73. * Callee-saved registers that in-kernel function will preserve.
  74. * Stored on the stack.
  75. */
  76. [BPF_REG_6] = {STACK_OFFSET(BPF_R6_HI), STACK_OFFSET(BPF_R6_LO)},
  77. [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
  78. [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
  79. [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
  80. /* Read-only frame pointer to access BPF stack. */
  81. [BPF_REG_FP] = {RV_REG_S6, RV_REG_S5},
  82. /* Temporary register for blinding constants. Stored on the stack. */
  83. [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
  84. /*
  85. * Temporary registers used by the JIT to operate on registers stored
  86. * on the stack. Save t0 and t1 to be used as temporaries in generated
  87. * code.
  88. */
  89. [TMP_REG_1] = {RV_REG_T3, RV_REG_T2},
  90. [TMP_REG_2] = {RV_REG_T5, RV_REG_T4},
  91. };
  92. static s8 hi(const s8 *r)
  93. {
  94. return r[0];
  95. }
  96. static s8 lo(const s8 *r)
  97. {
  98. return r[1];
  99. }
  100. static void emit_imm(const s8 rd, s32 imm, struct rv_jit_context *ctx)
  101. {
  102. u32 upper = (imm + (1 << 11)) >> 12;
  103. u32 lower = imm & 0xfff;
  104. if (upper) {
  105. emit(rv_lui(rd, upper), ctx);
  106. emit(rv_addi(rd, rd, lower), ctx);
  107. } else {
  108. emit(rv_addi(rd, RV_REG_ZERO, lower), ctx);
  109. }
  110. }
  111. static void emit_imm32(const s8 *rd, s32 imm, struct rv_jit_context *ctx)
  112. {
  113. /* Emit immediate into lower bits. */
  114. emit_imm(lo(rd), imm, ctx);
  115. /* Sign-extend into upper bits. */
  116. if (imm >= 0)
  117. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  118. else
  119. emit(rv_addi(hi(rd), RV_REG_ZERO, -1), ctx);
  120. }
  121. static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo,
  122. struct rv_jit_context *ctx)
  123. {
  124. emit_imm(lo(rd), imm_lo, ctx);
  125. emit_imm(hi(rd), imm_hi, ctx);
  126. }
  127. static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
  128. {
  129. int stack_adjust = ctx->stack_size;
  130. const s8 *r0 = bpf2rv32[BPF_REG_0];
  131. /* Set return value if not tail call. */
  132. if (!is_tail_call) {
  133. emit(rv_addi(RV_REG_A0, lo(r0), 0), ctx);
  134. emit(rv_addi(RV_REG_A1, hi(r0), 0), ctx);
  135. }
  136. /* Restore callee-saved registers. */
  137. emit(rv_lw(RV_REG_RA, stack_adjust - 4, RV_REG_SP), ctx);
  138. emit(rv_lw(RV_REG_FP, stack_adjust - 8, RV_REG_SP), ctx);
  139. emit(rv_lw(RV_REG_S1, stack_adjust - 12, RV_REG_SP), ctx);
  140. emit(rv_lw(RV_REG_S2, stack_adjust - 16, RV_REG_SP), ctx);
  141. emit(rv_lw(RV_REG_S3, stack_adjust - 20, RV_REG_SP), ctx);
  142. emit(rv_lw(RV_REG_S4, stack_adjust - 24, RV_REG_SP), ctx);
  143. emit(rv_lw(RV_REG_S5, stack_adjust - 28, RV_REG_SP), ctx);
  144. emit(rv_lw(RV_REG_S6, stack_adjust - 32, RV_REG_SP), ctx);
  145. emit(rv_lw(RV_REG_S7, stack_adjust - 36, RV_REG_SP), ctx);
  146. emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
  147. if (is_tail_call) {
  148. /*
  149. * goto *(t0 + 4);
  150. * Skips first instruction of prologue which initializes tail
  151. * call counter. Assumes t0 contains address of target program,
  152. * see emit_bpf_tail_call.
  153. */
  154. emit(rv_jalr(RV_REG_ZERO, RV_REG_T0, 4), ctx);
  155. } else {
  156. emit(rv_jalr(RV_REG_ZERO, RV_REG_RA, 0), ctx);
  157. }
  158. }
  159. static bool is_stacked(s8 reg)
  160. {
  161. return reg < 0;
  162. }
  163. static const s8 *bpf_get_reg64(const s8 *reg, const s8 *tmp,
  164. struct rv_jit_context *ctx)
  165. {
  166. if (is_stacked(hi(reg))) {
  167. emit(rv_lw(hi(tmp), hi(reg), RV_REG_FP), ctx);
  168. emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx);
  169. reg = tmp;
  170. }
  171. return reg;
  172. }
  173. static void bpf_put_reg64(const s8 *reg, const s8 *src,
  174. struct rv_jit_context *ctx)
  175. {
  176. if (is_stacked(hi(reg))) {
  177. emit(rv_sw(RV_REG_FP, hi(reg), hi(src)), ctx);
  178. emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx);
  179. }
  180. }
  181. static const s8 *bpf_get_reg32(const s8 *reg, const s8 *tmp,
  182. struct rv_jit_context *ctx)
  183. {
  184. if (is_stacked(lo(reg))) {
  185. emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx);
  186. reg = tmp;
  187. }
  188. return reg;
  189. }
  190. static void bpf_put_reg32(const s8 *reg, const s8 *src,
  191. struct rv_jit_context *ctx)
  192. {
  193. if (is_stacked(lo(reg))) {
  194. emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx);
  195. if (!ctx->prog->aux->verifier_zext)
  196. emit(rv_sw(RV_REG_FP, hi(reg), RV_REG_ZERO), ctx);
  197. } else if (!ctx->prog->aux->verifier_zext) {
  198. emit(rv_addi(hi(reg), RV_REG_ZERO, 0), ctx);
  199. }
  200. }
  201. static void emit_jump_and_link(u8 rd, s32 rvoff, bool force_jalr,
  202. struct rv_jit_context *ctx)
  203. {
  204. s32 upper, lower;
  205. if (rvoff && is_21b_int(rvoff) && !force_jalr) {
  206. emit(rv_jal(rd, rvoff >> 1), ctx);
  207. return;
  208. }
  209. upper = (rvoff + (1 << 11)) >> 12;
  210. lower = rvoff & 0xfff;
  211. emit(rv_auipc(RV_REG_T1, upper), ctx);
  212. emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
  213. }
  214. static void emit_alu_i64(const s8 *dst, s32 imm,
  215. struct rv_jit_context *ctx, const u8 op)
  216. {
  217. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  218. const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
  219. switch (op) {
  220. case BPF_MOV:
  221. emit_imm32(rd, imm, ctx);
  222. break;
  223. case BPF_AND:
  224. if (is_12b_int(imm)) {
  225. emit(rv_andi(lo(rd), lo(rd), imm), ctx);
  226. } else {
  227. emit_imm(RV_REG_T0, imm, ctx);
  228. emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx);
  229. }
  230. if (imm >= 0)
  231. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  232. break;
  233. case BPF_OR:
  234. if (is_12b_int(imm)) {
  235. emit(rv_ori(lo(rd), lo(rd), imm), ctx);
  236. } else {
  237. emit_imm(RV_REG_T0, imm, ctx);
  238. emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx);
  239. }
  240. if (imm < 0)
  241. emit(rv_ori(hi(rd), RV_REG_ZERO, -1), ctx);
  242. break;
  243. case BPF_XOR:
  244. if (is_12b_int(imm)) {
  245. emit(rv_xori(lo(rd), lo(rd), imm), ctx);
  246. } else {
  247. emit_imm(RV_REG_T0, imm, ctx);
  248. emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx);
  249. }
  250. if (imm < 0)
  251. emit(rv_xori(hi(rd), hi(rd), -1), ctx);
  252. break;
  253. case BPF_LSH:
  254. if (imm >= 32) {
  255. emit(rv_slli(hi(rd), lo(rd), imm - 32), ctx);
  256. emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx);
  257. } else if (imm == 0) {
  258. /* Do nothing. */
  259. } else {
  260. emit(rv_srli(RV_REG_T0, lo(rd), 32 - imm), ctx);
  261. emit(rv_slli(hi(rd), hi(rd), imm), ctx);
  262. emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
  263. emit(rv_slli(lo(rd), lo(rd), imm), ctx);
  264. }
  265. break;
  266. case BPF_RSH:
  267. if (imm >= 32) {
  268. emit(rv_srli(lo(rd), hi(rd), imm - 32), ctx);
  269. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  270. } else if (imm == 0) {
  271. /* Do nothing. */
  272. } else {
  273. emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx);
  274. emit(rv_srli(lo(rd), lo(rd), imm), ctx);
  275. emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
  276. emit(rv_srli(hi(rd), hi(rd), imm), ctx);
  277. }
  278. break;
  279. case BPF_ARSH:
  280. if (imm >= 32) {
  281. emit(rv_srai(lo(rd), hi(rd), imm - 32), ctx);
  282. emit(rv_srai(hi(rd), hi(rd), 31), ctx);
  283. } else if (imm == 0) {
  284. /* Do nothing. */
  285. } else {
  286. emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx);
  287. emit(rv_srli(lo(rd), lo(rd), imm), ctx);
  288. emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
  289. emit(rv_srai(hi(rd), hi(rd), imm), ctx);
  290. }
  291. break;
  292. }
  293. bpf_put_reg64(dst, rd, ctx);
  294. }
  295. static void emit_alu_i32(const s8 *dst, s32 imm,
  296. struct rv_jit_context *ctx, const u8 op)
  297. {
  298. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  299. const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
  300. switch (op) {
  301. case BPF_MOV:
  302. emit_imm(lo(rd), imm, ctx);
  303. break;
  304. case BPF_ADD:
  305. if (is_12b_int(imm)) {
  306. emit(rv_addi(lo(rd), lo(rd), imm), ctx);
  307. } else {
  308. emit_imm(RV_REG_T0, imm, ctx);
  309. emit(rv_add(lo(rd), lo(rd), RV_REG_T0), ctx);
  310. }
  311. break;
  312. case BPF_SUB:
  313. if (is_12b_int(-imm)) {
  314. emit(rv_addi(lo(rd), lo(rd), -imm), ctx);
  315. } else {
  316. emit_imm(RV_REG_T0, imm, ctx);
  317. emit(rv_sub(lo(rd), lo(rd), RV_REG_T0), ctx);
  318. }
  319. break;
  320. case BPF_AND:
  321. if (is_12b_int(imm)) {
  322. emit(rv_andi(lo(rd), lo(rd), imm), ctx);
  323. } else {
  324. emit_imm(RV_REG_T0, imm, ctx);
  325. emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx);
  326. }
  327. break;
  328. case BPF_OR:
  329. if (is_12b_int(imm)) {
  330. emit(rv_ori(lo(rd), lo(rd), imm), ctx);
  331. } else {
  332. emit_imm(RV_REG_T0, imm, ctx);
  333. emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx);
  334. }
  335. break;
  336. case BPF_XOR:
  337. if (is_12b_int(imm)) {
  338. emit(rv_xori(lo(rd), lo(rd), imm), ctx);
  339. } else {
  340. emit_imm(RV_REG_T0, imm, ctx);
  341. emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx);
  342. }
  343. break;
  344. case BPF_LSH:
  345. if (is_12b_int(imm)) {
  346. emit(rv_slli(lo(rd), lo(rd), imm), ctx);
  347. } else {
  348. emit_imm(RV_REG_T0, imm, ctx);
  349. emit(rv_sll(lo(rd), lo(rd), RV_REG_T0), ctx);
  350. }
  351. break;
  352. case BPF_RSH:
  353. if (is_12b_int(imm)) {
  354. emit(rv_srli(lo(rd), lo(rd), imm), ctx);
  355. } else {
  356. emit_imm(RV_REG_T0, imm, ctx);
  357. emit(rv_srl(lo(rd), lo(rd), RV_REG_T0), ctx);
  358. }
  359. break;
  360. case BPF_ARSH:
  361. if (is_12b_int(imm)) {
  362. emit(rv_srai(lo(rd), lo(rd), imm), ctx);
  363. } else {
  364. emit_imm(RV_REG_T0, imm, ctx);
  365. emit(rv_sra(lo(rd), lo(rd), RV_REG_T0), ctx);
  366. }
  367. break;
  368. }
  369. bpf_put_reg32(dst, rd, ctx);
  370. }
  371. static void emit_alu_r64(const s8 *dst, const s8 *src,
  372. struct rv_jit_context *ctx, const u8 op)
  373. {
  374. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  375. const s8 *tmp2 = bpf2rv32[TMP_REG_2];
  376. const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
  377. const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
  378. switch (op) {
  379. case BPF_MOV:
  380. emit(rv_addi(lo(rd), lo(rs), 0), ctx);
  381. emit(rv_addi(hi(rd), hi(rs), 0), ctx);
  382. break;
  383. case BPF_ADD:
  384. if (rd == rs) {
  385. emit(rv_srli(RV_REG_T0, lo(rd), 31), ctx);
  386. emit(rv_slli(hi(rd), hi(rd), 1), ctx);
  387. emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
  388. emit(rv_slli(lo(rd), lo(rd), 1), ctx);
  389. } else {
  390. emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx);
  391. emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx);
  392. emit(rv_add(hi(rd), hi(rd), hi(rs)), ctx);
  393. emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx);
  394. }
  395. break;
  396. case BPF_SUB:
  397. emit(rv_sub(RV_REG_T1, hi(rd), hi(rs)), ctx);
  398. emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx);
  399. emit(rv_sub(hi(rd), RV_REG_T1, RV_REG_T0), ctx);
  400. emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx);
  401. break;
  402. case BPF_AND:
  403. emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx);
  404. emit(rv_and(hi(rd), hi(rd), hi(rs)), ctx);
  405. break;
  406. case BPF_OR:
  407. emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx);
  408. emit(rv_or(hi(rd), hi(rd), hi(rs)), ctx);
  409. break;
  410. case BPF_XOR:
  411. emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx);
  412. emit(rv_xor(hi(rd), hi(rd), hi(rs)), ctx);
  413. break;
  414. case BPF_MUL:
  415. emit(rv_mul(RV_REG_T0, hi(rs), lo(rd)), ctx);
  416. emit(rv_mul(hi(rd), hi(rd), lo(rs)), ctx);
  417. emit(rv_mulhu(RV_REG_T1, lo(rd), lo(rs)), ctx);
  418. emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx);
  419. emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx);
  420. emit(rv_add(hi(rd), hi(rd), RV_REG_T1), ctx);
  421. break;
  422. case BPF_LSH:
  423. emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
  424. emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
  425. emit(rv_sll(hi(rd), lo(rd), RV_REG_T0), ctx);
  426. emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx);
  427. emit(rv_jal(RV_REG_ZERO, 16), ctx);
  428. emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
  429. emit(rv_srli(RV_REG_T0, lo(rd), 1), ctx);
  430. emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
  431. emit(rv_srl(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
  432. emit(rv_sll(hi(rd), hi(rd), lo(rs)), ctx);
  433. emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
  434. emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx);
  435. break;
  436. case BPF_RSH:
  437. emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
  438. emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
  439. emit(rv_srl(lo(rd), hi(rd), RV_REG_T0), ctx);
  440. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  441. emit(rv_jal(RV_REG_ZERO, 16), ctx);
  442. emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
  443. emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx);
  444. emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
  445. emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
  446. emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
  447. emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
  448. emit(rv_srl(hi(rd), hi(rd), lo(rs)), ctx);
  449. break;
  450. case BPF_ARSH:
  451. emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
  452. emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
  453. emit(rv_sra(lo(rd), hi(rd), RV_REG_T0), ctx);
  454. emit(rv_srai(hi(rd), hi(rd), 31), ctx);
  455. emit(rv_jal(RV_REG_ZERO, 16), ctx);
  456. emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
  457. emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx);
  458. emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
  459. emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
  460. emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
  461. emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
  462. emit(rv_sra(hi(rd), hi(rd), lo(rs)), ctx);
  463. break;
  464. case BPF_NEG:
  465. emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx);
  466. emit(rv_sltu(RV_REG_T0, RV_REG_ZERO, lo(rd)), ctx);
  467. emit(rv_sub(hi(rd), RV_REG_ZERO, hi(rd)), ctx);
  468. emit(rv_sub(hi(rd), hi(rd), RV_REG_T0), ctx);
  469. break;
  470. }
  471. bpf_put_reg64(dst, rd, ctx);
  472. }
  473. static void emit_alu_r32(const s8 *dst, const s8 *src,
  474. struct rv_jit_context *ctx, const u8 op)
  475. {
  476. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  477. const s8 *tmp2 = bpf2rv32[TMP_REG_2];
  478. const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
  479. const s8 *rs = bpf_get_reg32(src, tmp2, ctx);
  480. switch (op) {
  481. case BPF_MOV:
  482. emit(rv_addi(lo(rd), lo(rs), 0), ctx);
  483. break;
  484. case BPF_ADD:
  485. emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx);
  486. break;
  487. case BPF_SUB:
  488. emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx);
  489. break;
  490. case BPF_AND:
  491. emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx);
  492. break;
  493. case BPF_OR:
  494. emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx);
  495. break;
  496. case BPF_XOR:
  497. emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx);
  498. break;
  499. case BPF_MUL:
  500. emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx);
  501. break;
  502. case BPF_DIV:
  503. emit(rv_divu(lo(rd), lo(rd), lo(rs)), ctx);
  504. break;
  505. case BPF_MOD:
  506. emit(rv_remu(lo(rd), lo(rd), lo(rs)), ctx);
  507. break;
  508. case BPF_LSH:
  509. emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx);
  510. break;
  511. case BPF_RSH:
  512. emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
  513. break;
  514. case BPF_ARSH:
  515. emit(rv_sra(lo(rd), lo(rd), lo(rs)), ctx);
  516. break;
  517. case BPF_NEG:
  518. emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx);
  519. break;
  520. }
  521. bpf_put_reg32(dst, rd, ctx);
  522. }
  523. static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 rvoff,
  524. struct rv_jit_context *ctx, const u8 op)
  525. {
  526. int e, s = ctx->ninsns;
  527. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  528. const s8 *tmp2 = bpf2rv32[TMP_REG_2];
  529. const s8 *rs1 = bpf_get_reg64(src1, tmp1, ctx);
  530. const s8 *rs2 = bpf_get_reg64(src2, tmp2, ctx);
  531. /*
  532. * NO_JUMP skips over the rest of the instructions and the
  533. * emit_jump_and_link, meaning the BPF branch is not taken.
  534. * JUMP skips directly to the emit_jump_and_link, meaning
  535. * the BPF branch is taken.
  536. *
  537. * The fallthrough case results in the BPF branch being taken.
  538. */
  539. #define NO_JUMP(idx) (6 + (2 * (idx)))
  540. #define JUMP(idx) (2 + (2 * (idx)))
  541. switch (op) {
  542. case BPF_JEQ:
  543. emit(rv_bne(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  544. emit(rv_bne(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  545. break;
  546. case BPF_JGT:
  547. emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
  548. emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  549. emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  550. break;
  551. case BPF_JLT:
  552. emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
  553. emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  554. emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  555. break;
  556. case BPF_JGE:
  557. emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
  558. emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  559. emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  560. break;
  561. case BPF_JLE:
  562. emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
  563. emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  564. emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  565. break;
  566. case BPF_JNE:
  567. emit(rv_bne(hi(rs1), hi(rs2), JUMP(1)), ctx);
  568. emit(rv_beq(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  569. break;
  570. case BPF_JSGT:
  571. emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
  572. emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  573. emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  574. break;
  575. case BPF_JSLT:
  576. emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
  577. emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  578. emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  579. break;
  580. case BPF_JSGE:
  581. emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
  582. emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  583. emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  584. break;
  585. case BPF_JSLE:
  586. emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
  587. emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
  588. emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
  589. break;
  590. case BPF_JSET:
  591. emit(rv_and(RV_REG_T0, hi(rs1), hi(rs2)), ctx);
  592. emit(rv_bne(RV_REG_T0, RV_REG_ZERO, JUMP(2)), ctx);
  593. emit(rv_and(RV_REG_T0, lo(rs1), lo(rs2)), ctx);
  594. emit(rv_beq(RV_REG_T0, RV_REG_ZERO, NO_JUMP(0)), ctx);
  595. break;
  596. }
  597. #undef NO_JUMP
  598. #undef JUMP
  599. e = ctx->ninsns;
  600. /* Adjust for extra insns. */
  601. rvoff -= ninsns_rvoff(e - s);
  602. emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
  603. return 0;
  604. }
  605. static int emit_bcc(u8 op, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx)
  606. {
  607. int e, s = ctx->ninsns;
  608. bool far = false;
  609. int off;
  610. if (op == BPF_JSET) {
  611. /*
  612. * BPF_JSET is a special case: it has no inverse so we always
  613. * treat it as a far branch.
  614. */
  615. far = true;
  616. } else if (!is_13b_int(rvoff)) {
  617. op = invert_bpf_cond(op);
  618. far = true;
  619. }
  620. /*
  621. * For a far branch, the condition is negated and we jump over the
  622. * branch itself, and the two instructions from emit_jump_and_link.
  623. * For a near branch, just use rvoff.
  624. */
  625. off = far ? 6 : (rvoff >> 1);
  626. switch (op) {
  627. case BPF_JEQ:
  628. emit(rv_beq(rd, rs, off), ctx);
  629. break;
  630. case BPF_JGT:
  631. emit(rv_bgtu(rd, rs, off), ctx);
  632. break;
  633. case BPF_JLT:
  634. emit(rv_bltu(rd, rs, off), ctx);
  635. break;
  636. case BPF_JGE:
  637. emit(rv_bgeu(rd, rs, off), ctx);
  638. break;
  639. case BPF_JLE:
  640. emit(rv_bleu(rd, rs, off), ctx);
  641. break;
  642. case BPF_JNE:
  643. emit(rv_bne(rd, rs, off), ctx);
  644. break;
  645. case BPF_JSGT:
  646. emit(rv_bgt(rd, rs, off), ctx);
  647. break;
  648. case BPF_JSLT:
  649. emit(rv_blt(rd, rs, off), ctx);
  650. break;
  651. case BPF_JSGE:
  652. emit(rv_bge(rd, rs, off), ctx);
  653. break;
  654. case BPF_JSLE:
  655. emit(rv_ble(rd, rs, off), ctx);
  656. break;
  657. case BPF_JSET:
  658. emit(rv_and(RV_REG_T0, rd, rs), ctx);
  659. emit(rv_beq(RV_REG_T0, RV_REG_ZERO, off), ctx);
  660. break;
  661. }
  662. if (far) {
  663. e = ctx->ninsns;
  664. /* Adjust for extra insns. */
  665. rvoff -= ninsns_rvoff(e - s);
  666. emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
  667. }
  668. return 0;
  669. }
  670. static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 rvoff,
  671. struct rv_jit_context *ctx, const u8 op)
  672. {
  673. int e, s = ctx->ninsns;
  674. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  675. const s8 *tmp2 = bpf2rv32[TMP_REG_2];
  676. const s8 *rs1 = bpf_get_reg32(src1, tmp1, ctx);
  677. const s8 *rs2 = bpf_get_reg32(src2, tmp2, ctx);
  678. e = ctx->ninsns;
  679. /* Adjust for extra insns. */
  680. rvoff -= ninsns_rvoff(e - s);
  681. if (emit_bcc(op, lo(rs1), lo(rs2), rvoff, ctx))
  682. return -1;
  683. return 0;
  684. }
  685. static void emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
  686. {
  687. const s8 *r0 = bpf2rv32[BPF_REG_0];
  688. const s8 *r5 = bpf2rv32[BPF_REG_5];
  689. u32 upper = ((u32)addr + (1 << 11)) >> 12;
  690. u32 lower = addr & 0xfff;
  691. /* R1-R4 already in correct registers---need to push R5 to stack. */
  692. emit(rv_addi(RV_REG_SP, RV_REG_SP, -16), ctx);
  693. emit(rv_sw(RV_REG_SP, 0, lo(r5)), ctx);
  694. emit(rv_sw(RV_REG_SP, 4, hi(r5)), ctx);
  695. /* Backup TCC. */
  696. emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx);
  697. /*
  698. * Use lui/jalr pair to jump to absolute address. Don't use emit_imm as
  699. * the number of emitted instructions should not depend on the value of
  700. * addr.
  701. */
  702. emit(rv_lui(RV_REG_T1, upper), ctx);
  703. emit(rv_jalr(RV_REG_RA, RV_REG_T1, lower), ctx);
  704. /* Restore TCC. */
  705. emit(rv_addi(RV_REG_TCC, RV_REG_TCC_SAVED, 0), ctx);
  706. /* Set return value and restore stack. */
  707. emit(rv_addi(lo(r0), RV_REG_A0, 0), ctx);
  708. emit(rv_addi(hi(r0), RV_REG_A1, 0), ctx);
  709. emit(rv_addi(RV_REG_SP, RV_REG_SP, 16), ctx);
  710. }
  711. static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
  712. {
  713. /*
  714. * R1 -> &ctx
  715. * R2 -> &array
  716. * R3 -> index
  717. */
  718. int tc_ninsn, off, start_insn = ctx->ninsns;
  719. const s8 *arr_reg = bpf2rv32[BPF_REG_2];
  720. const s8 *idx_reg = bpf2rv32[BPF_REG_3];
  721. tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
  722. ctx->offset[0];
  723. /* max_entries = array->map.max_entries; */
  724. off = offsetof(struct bpf_array, map.max_entries);
  725. if (is_12b_check(off, insn))
  726. return -1;
  727. emit(rv_lw(RV_REG_T1, off, lo(arr_reg)), ctx);
  728. /*
  729. * if (index >= max_entries)
  730. * goto out;
  731. */
  732. off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
  733. emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx);
  734. /*
  735. * if (--tcc < 0)
  736. * goto out;
  737. */
  738. emit(rv_addi(RV_REG_TCC, RV_REG_TCC, -1), ctx);
  739. off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
  740. emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
  741. /*
  742. * prog = array->ptrs[index];
  743. * if (!prog)
  744. * goto out;
  745. */
  746. emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx);
  747. emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx);
  748. off = offsetof(struct bpf_array, ptrs);
  749. if (is_12b_check(off, insn))
  750. return -1;
  751. emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
  752. off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
  753. emit_bcc(BPF_JEQ, RV_REG_T0, RV_REG_ZERO, off, ctx);
  754. /*
  755. * tcc = temp_tcc;
  756. * goto *(prog->bpf_func + 4);
  757. */
  758. off = offsetof(struct bpf_prog, bpf_func);
  759. if (is_12b_check(off, insn))
  760. return -1;
  761. emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
  762. /* Epilogue jumps to *(t0 + 4). */
  763. __build_epilogue(true, ctx);
  764. return 0;
  765. }
  766. static int emit_load_r64(const s8 *dst, const s8 *src, s16 off,
  767. struct rv_jit_context *ctx, const u8 size)
  768. {
  769. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  770. const s8 *tmp2 = bpf2rv32[TMP_REG_2];
  771. const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
  772. const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
  773. emit_imm(RV_REG_T0, off, ctx);
  774. emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rs)), ctx);
  775. switch (size) {
  776. case BPF_B:
  777. emit(rv_lbu(lo(rd), 0, RV_REG_T0), ctx);
  778. if (!ctx->prog->aux->verifier_zext)
  779. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  780. break;
  781. case BPF_H:
  782. emit(rv_lhu(lo(rd), 0, RV_REG_T0), ctx);
  783. if (!ctx->prog->aux->verifier_zext)
  784. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  785. break;
  786. case BPF_W:
  787. emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx);
  788. if (!ctx->prog->aux->verifier_zext)
  789. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  790. break;
  791. case BPF_DW:
  792. emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx);
  793. emit(rv_lw(hi(rd), 4, RV_REG_T0), ctx);
  794. break;
  795. }
  796. bpf_put_reg64(dst, rd, ctx);
  797. return 0;
  798. }
  799. static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
  800. struct rv_jit_context *ctx, const u8 size,
  801. const u8 mode)
  802. {
  803. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  804. const s8 *tmp2 = bpf2rv32[TMP_REG_2];
  805. const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
  806. const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
  807. if (mode == BPF_ATOMIC && size != BPF_W)
  808. return -1;
  809. emit_imm(RV_REG_T0, off, ctx);
  810. emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rd)), ctx);
  811. switch (size) {
  812. case BPF_B:
  813. emit(rv_sb(RV_REG_T0, 0, lo(rs)), ctx);
  814. break;
  815. case BPF_H:
  816. emit(rv_sh(RV_REG_T0, 0, lo(rs)), ctx);
  817. break;
  818. case BPF_W:
  819. switch (mode) {
  820. case BPF_MEM:
  821. emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
  822. break;
  823. case BPF_ATOMIC: /* Only BPF_ADD supported */
  824. emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0),
  825. ctx);
  826. break;
  827. }
  828. break;
  829. case BPF_DW:
  830. emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
  831. emit(rv_sw(RV_REG_T0, 4, hi(rs)), ctx);
  832. break;
  833. }
  834. return 0;
  835. }
  836. static void emit_rev16(const s8 rd, struct rv_jit_context *ctx)
  837. {
  838. emit(rv_slli(rd, rd, 16), ctx);
  839. emit(rv_slli(RV_REG_T1, rd, 8), ctx);
  840. emit(rv_srli(rd, rd, 8), ctx);
  841. emit(rv_add(RV_REG_T1, rd, RV_REG_T1), ctx);
  842. emit(rv_srli(rd, RV_REG_T1, 16), ctx);
  843. }
  844. static void emit_rev32(const s8 rd, struct rv_jit_context *ctx)
  845. {
  846. emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 0), ctx);
  847. emit(rv_andi(RV_REG_T0, rd, 255), ctx);
  848. emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
  849. emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
  850. emit(rv_srli(rd, rd, 8), ctx);
  851. emit(rv_andi(RV_REG_T0, rd, 255), ctx);
  852. emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
  853. emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
  854. emit(rv_srli(rd, rd, 8), ctx);
  855. emit(rv_andi(RV_REG_T0, rd, 255), ctx);
  856. emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
  857. emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
  858. emit(rv_srli(rd, rd, 8), ctx);
  859. emit(rv_andi(RV_REG_T0, rd, 255), ctx);
  860. emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
  861. emit(rv_addi(rd, RV_REG_T1, 0), ctx);
  862. }
  863. static void emit_zext64(const s8 *dst, struct rv_jit_context *ctx)
  864. {
  865. const s8 *rd;
  866. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  867. rd = bpf_get_reg64(dst, tmp1, ctx);
  868. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  869. bpf_put_reg64(dst, rd, ctx);
  870. }
  871. int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
  872. bool extra_pass)
  873. {
  874. bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
  875. BPF_CLASS(insn->code) == BPF_JMP;
  876. int s, e, rvoff, i = insn - ctx->prog->insnsi;
  877. u8 code = insn->code;
  878. s16 off = insn->off;
  879. s32 imm = insn->imm;
  880. const s8 *dst = bpf2rv32[insn->dst_reg];
  881. const s8 *src = bpf2rv32[insn->src_reg];
  882. const s8 *tmp1 = bpf2rv32[TMP_REG_1];
  883. const s8 *tmp2 = bpf2rv32[TMP_REG_2];
  884. switch (code) {
  885. case BPF_ALU64 | BPF_MOV | BPF_X:
  886. case BPF_ALU64 | BPF_ADD | BPF_X:
  887. case BPF_ALU64 | BPF_ADD | BPF_K:
  888. case BPF_ALU64 | BPF_SUB | BPF_X:
  889. case BPF_ALU64 | BPF_SUB | BPF_K:
  890. case BPF_ALU64 | BPF_AND | BPF_X:
  891. case BPF_ALU64 | BPF_OR | BPF_X:
  892. case BPF_ALU64 | BPF_XOR | BPF_X:
  893. case BPF_ALU64 | BPF_MUL | BPF_X:
  894. case BPF_ALU64 | BPF_MUL | BPF_K:
  895. case BPF_ALU64 | BPF_LSH | BPF_X:
  896. case BPF_ALU64 | BPF_RSH | BPF_X:
  897. case BPF_ALU64 | BPF_ARSH | BPF_X:
  898. if (BPF_SRC(code) == BPF_K) {
  899. emit_imm32(tmp2, imm, ctx);
  900. src = tmp2;
  901. }
  902. emit_alu_r64(dst, src, ctx, BPF_OP(code));
  903. break;
  904. case BPF_ALU64 | BPF_NEG:
  905. emit_alu_r64(dst, tmp2, ctx, BPF_OP(code));
  906. break;
  907. case BPF_ALU64 | BPF_DIV | BPF_X:
  908. case BPF_ALU64 | BPF_DIV | BPF_K:
  909. case BPF_ALU64 | BPF_MOD | BPF_X:
  910. case BPF_ALU64 | BPF_MOD | BPF_K:
  911. goto notsupported;
  912. case BPF_ALU64 | BPF_MOV | BPF_K:
  913. case BPF_ALU64 | BPF_AND | BPF_K:
  914. case BPF_ALU64 | BPF_OR | BPF_K:
  915. case BPF_ALU64 | BPF_XOR | BPF_K:
  916. case BPF_ALU64 | BPF_LSH | BPF_K:
  917. case BPF_ALU64 | BPF_RSH | BPF_K:
  918. case BPF_ALU64 | BPF_ARSH | BPF_K:
  919. emit_alu_i64(dst, imm, ctx, BPF_OP(code));
  920. break;
  921. case BPF_ALU | BPF_MOV | BPF_X:
  922. if (imm == 1) {
  923. /* Special mov32 for zext. */
  924. emit_zext64(dst, ctx);
  925. break;
  926. }
  927. fallthrough;
  928. case BPF_ALU | BPF_ADD | BPF_X:
  929. case BPF_ALU | BPF_SUB | BPF_X:
  930. case BPF_ALU | BPF_AND | BPF_X:
  931. case BPF_ALU | BPF_OR | BPF_X:
  932. case BPF_ALU | BPF_XOR | BPF_X:
  933. case BPF_ALU | BPF_MUL | BPF_X:
  934. case BPF_ALU | BPF_MUL | BPF_K:
  935. case BPF_ALU | BPF_DIV | BPF_X:
  936. case BPF_ALU | BPF_DIV | BPF_K:
  937. case BPF_ALU | BPF_MOD | BPF_X:
  938. case BPF_ALU | BPF_MOD | BPF_K:
  939. case BPF_ALU | BPF_LSH | BPF_X:
  940. case BPF_ALU | BPF_RSH | BPF_X:
  941. case BPF_ALU | BPF_ARSH | BPF_X:
  942. if (BPF_SRC(code) == BPF_K) {
  943. emit_imm32(tmp2, imm, ctx);
  944. src = tmp2;
  945. }
  946. emit_alu_r32(dst, src, ctx, BPF_OP(code));
  947. break;
  948. case BPF_ALU | BPF_MOV | BPF_K:
  949. case BPF_ALU | BPF_ADD | BPF_K:
  950. case BPF_ALU | BPF_SUB | BPF_K:
  951. case BPF_ALU | BPF_AND | BPF_K:
  952. case BPF_ALU | BPF_OR | BPF_K:
  953. case BPF_ALU | BPF_XOR | BPF_K:
  954. case BPF_ALU | BPF_LSH | BPF_K:
  955. case BPF_ALU | BPF_RSH | BPF_K:
  956. case BPF_ALU | BPF_ARSH | BPF_K:
  957. /*
  958. * mul,div,mod are handled in the BPF_X case since there are
  959. * no RISC-V I-type equivalents.
  960. */
  961. emit_alu_i32(dst, imm, ctx, BPF_OP(code));
  962. break;
  963. case BPF_ALU | BPF_NEG:
  964. /*
  965. * src is ignored---choose tmp2 as a dummy register since it
  966. * is not on the stack.
  967. */
  968. emit_alu_r32(dst, tmp2, ctx, BPF_OP(code));
  969. break;
  970. case BPF_ALU | BPF_END | BPF_FROM_LE:
  971. {
  972. const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
  973. switch (imm) {
  974. case 16:
  975. emit(rv_slli(lo(rd), lo(rd), 16), ctx);
  976. emit(rv_srli(lo(rd), lo(rd), 16), ctx);
  977. fallthrough;
  978. case 32:
  979. if (!ctx->prog->aux->verifier_zext)
  980. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  981. break;
  982. case 64:
  983. /* Do nothing. */
  984. break;
  985. default:
  986. pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
  987. return -1;
  988. }
  989. bpf_put_reg64(dst, rd, ctx);
  990. break;
  991. }
  992. case BPF_ALU | BPF_END | BPF_FROM_BE:
  993. {
  994. const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
  995. switch (imm) {
  996. case 16:
  997. emit_rev16(lo(rd), ctx);
  998. if (!ctx->prog->aux->verifier_zext)
  999. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  1000. break;
  1001. case 32:
  1002. emit_rev32(lo(rd), ctx);
  1003. if (!ctx->prog->aux->verifier_zext)
  1004. emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
  1005. break;
  1006. case 64:
  1007. /* Swap upper and lower halves. */
  1008. emit(rv_addi(RV_REG_T0, lo(rd), 0), ctx);
  1009. emit(rv_addi(lo(rd), hi(rd), 0), ctx);
  1010. emit(rv_addi(hi(rd), RV_REG_T0, 0), ctx);
  1011. /* Swap each half. */
  1012. emit_rev32(lo(rd), ctx);
  1013. emit_rev32(hi(rd), ctx);
  1014. break;
  1015. default:
  1016. pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
  1017. return -1;
  1018. }
  1019. bpf_put_reg64(dst, rd, ctx);
  1020. break;
  1021. }
  1022. case BPF_JMP | BPF_JA:
  1023. rvoff = rv_offset(i, off, ctx);
  1024. emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
  1025. break;
  1026. case BPF_JMP | BPF_CALL:
  1027. {
  1028. bool fixed;
  1029. int ret;
  1030. u64 addr;
  1031. ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
  1032. &fixed);
  1033. if (ret < 0)
  1034. return ret;
  1035. emit_call(fixed, addr, ctx);
  1036. break;
  1037. }
  1038. case BPF_JMP | BPF_TAIL_CALL:
  1039. if (emit_bpf_tail_call(i, ctx))
  1040. return -1;
  1041. break;
  1042. case BPF_JMP | BPF_JEQ | BPF_X:
  1043. case BPF_JMP | BPF_JEQ | BPF_K:
  1044. case BPF_JMP32 | BPF_JEQ | BPF_X:
  1045. case BPF_JMP32 | BPF_JEQ | BPF_K:
  1046. case BPF_JMP | BPF_JNE | BPF_X:
  1047. case BPF_JMP | BPF_JNE | BPF_K:
  1048. case BPF_JMP32 | BPF_JNE | BPF_X:
  1049. case BPF_JMP32 | BPF_JNE | BPF_K:
  1050. case BPF_JMP | BPF_JLE | BPF_X:
  1051. case BPF_JMP | BPF_JLE | BPF_K:
  1052. case BPF_JMP32 | BPF_JLE | BPF_X:
  1053. case BPF_JMP32 | BPF_JLE | BPF_K:
  1054. case BPF_JMP | BPF_JLT | BPF_X:
  1055. case BPF_JMP | BPF_JLT | BPF_K:
  1056. case BPF_JMP32 | BPF_JLT | BPF_X:
  1057. case BPF_JMP32 | BPF_JLT | BPF_K:
  1058. case BPF_JMP | BPF_JGE | BPF_X:
  1059. case BPF_JMP | BPF_JGE | BPF_K:
  1060. case BPF_JMP32 | BPF_JGE | BPF_X:
  1061. case BPF_JMP32 | BPF_JGE | BPF_K:
  1062. case BPF_JMP | BPF_JGT | BPF_X:
  1063. case BPF_JMP | BPF_JGT | BPF_K:
  1064. case BPF_JMP32 | BPF_JGT | BPF_X:
  1065. case BPF_JMP32 | BPF_JGT | BPF_K:
  1066. case BPF_JMP | BPF_JSLE | BPF_X:
  1067. case BPF_JMP | BPF_JSLE | BPF_K:
  1068. case BPF_JMP32 | BPF_JSLE | BPF_X:
  1069. case BPF_JMP32 | BPF_JSLE | BPF_K:
  1070. case BPF_JMP | BPF_JSLT | BPF_X:
  1071. case BPF_JMP | BPF_JSLT | BPF_K:
  1072. case BPF_JMP32 | BPF_JSLT | BPF_X:
  1073. case BPF_JMP32 | BPF_JSLT | BPF_K:
  1074. case BPF_JMP | BPF_JSGE | BPF_X:
  1075. case BPF_JMP | BPF_JSGE | BPF_K:
  1076. case BPF_JMP32 | BPF_JSGE | BPF_X:
  1077. case BPF_JMP32 | BPF_JSGE | BPF_K:
  1078. case BPF_JMP | BPF_JSGT | BPF_X:
  1079. case BPF_JMP | BPF_JSGT | BPF_K:
  1080. case BPF_JMP32 | BPF_JSGT | BPF_X:
  1081. case BPF_JMP32 | BPF_JSGT | BPF_K:
  1082. case BPF_JMP | BPF_JSET | BPF_X:
  1083. case BPF_JMP | BPF_JSET | BPF_K:
  1084. case BPF_JMP32 | BPF_JSET | BPF_X:
  1085. case BPF_JMP32 | BPF_JSET | BPF_K:
  1086. rvoff = rv_offset(i, off, ctx);
  1087. if (BPF_SRC(code) == BPF_K) {
  1088. s = ctx->ninsns;
  1089. emit_imm32(tmp2, imm, ctx);
  1090. src = tmp2;
  1091. e = ctx->ninsns;
  1092. rvoff -= ninsns_rvoff(e - s);
  1093. }
  1094. if (is64)
  1095. emit_branch_r64(dst, src, rvoff, ctx, BPF_OP(code));
  1096. else
  1097. emit_branch_r32(dst, src, rvoff, ctx, BPF_OP(code));
  1098. break;
  1099. case BPF_JMP | BPF_EXIT:
  1100. if (i == ctx->prog->len - 1)
  1101. break;
  1102. rvoff = epilogue_offset(ctx);
  1103. emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
  1104. break;
  1105. case BPF_LD | BPF_IMM | BPF_DW:
  1106. {
  1107. struct bpf_insn insn1 = insn[1];
  1108. s32 imm_lo = imm;
  1109. s32 imm_hi = insn1.imm;
  1110. const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
  1111. emit_imm64(rd, imm_hi, imm_lo, ctx);
  1112. bpf_put_reg64(dst, rd, ctx);
  1113. return 1;
  1114. }
  1115. case BPF_LDX | BPF_MEM | BPF_B:
  1116. case BPF_LDX | BPF_MEM | BPF_H:
  1117. case BPF_LDX | BPF_MEM | BPF_W:
  1118. case BPF_LDX | BPF_MEM | BPF_DW:
  1119. if (emit_load_r64(dst, src, off, ctx, BPF_SIZE(code)))
  1120. return -1;
  1121. break;
  1122. /* speculation barrier */
  1123. case BPF_ST | BPF_NOSPEC:
  1124. break;
  1125. case BPF_ST | BPF_MEM | BPF_B:
  1126. case BPF_ST | BPF_MEM | BPF_H:
  1127. case BPF_ST | BPF_MEM | BPF_W:
  1128. case BPF_ST | BPF_MEM | BPF_DW:
  1129. case BPF_STX | BPF_MEM | BPF_B:
  1130. case BPF_STX | BPF_MEM | BPF_H:
  1131. case BPF_STX | BPF_MEM | BPF_W:
  1132. case BPF_STX | BPF_MEM | BPF_DW:
  1133. if (BPF_CLASS(code) == BPF_ST) {
  1134. emit_imm32(tmp2, imm, ctx);
  1135. src = tmp2;
  1136. }
  1137. if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
  1138. BPF_MODE(code)))
  1139. return -1;
  1140. break;
  1141. case BPF_STX | BPF_ATOMIC | BPF_W:
  1142. if (insn->imm != BPF_ADD) {
  1143. pr_info_once(
  1144. "bpf-jit: not supported: atomic operation %02x ***\n",
  1145. insn->imm);
  1146. return -EFAULT;
  1147. }
  1148. if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
  1149. BPF_MODE(code)))
  1150. return -1;
  1151. break;
  1152. /* No hardware support for 8-byte atomics in RV32. */
  1153. case BPF_STX | BPF_ATOMIC | BPF_DW:
  1154. /* Fallthrough. */
  1155. notsupported:
  1156. pr_info_once("bpf-jit: not supported: opcode %02x ***\n", code);
  1157. return -EFAULT;
  1158. default:
  1159. pr_err("bpf-jit: unknown opcode %02x\n", code);
  1160. return -EINVAL;
  1161. }
  1162. return 0;
  1163. }
  1164. void bpf_jit_build_prologue(struct rv_jit_context *ctx)
  1165. {
  1166. const s8 *fp = bpf2rv32[BPF_REG_FP];
  1167. const s8 *r1 = bpf2rv32[BPF_REG_1];
  1168. int stack_adjust = 0;
  1169. int bpf_stack_adjust =
  1170. round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
  1171. /* Make space for callee-saved registers. */
  1172. stack_adjust += NR_SAVED_REGISTERS * sizeof(u32);
  1173. /* Make space for BPF registers on stack. */
  1174. stack_adjust += BPF_JIT_SCRATCH_REGS * sizeof(u32);
  1175. /* Make space for BPF stack. */
  1176. stack_adjust += bpf_stack_adjust;
  1177. /* Round up for stack alignment. */
  1178. stack_adjust = round_up(stack_adjust, STACK_ALIGN);
  1179. /*
  1180. * The first instruction sets the tail-call-counter (TCC) register.
  1181. * This instruction is skipped by tail calls.
  1182. */
  1183. emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
  1184. emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
  1185. /* Save callee-save registers. */
  1186. emit(rv_sw(RV_REG_SP, stack_adjust - 4, RV_REG_RA), ctx);
  1187. emit(rv_sw(RV_REG_SP, stack_adjust - 8, RV_REG_FP), ctx);
  1188. emit(rv_sw(RV_REG_SP, stack_adjust - 12, RV_REG_S1), ctx);
  1189. emit(rv_sw(RV_REG_SP, stack_adjust - 16, RV_REG_S2), ctx);
  1190. emit(rv_sw(RV_REG_SP, stack_adjust - 20, RV_REG_S3), ctx);
  1191. emit(rv_sw(RV_REG_SP, stack_adjust - 24, RV_REG_S4), ctx);
  1192. emit(rv_sw(RV_REG_SP, stack_adjust - 28, RV_REG_S5), ctx);
  1193. emit(rv_sw(RV_REG_SP, stack_adjust - 32, RV_REG_S6), ctx);
  1194. emit(rv_sw(RV_REG_SP, stack_adjust - 36, RV_REG_S7), ctx);
  1195. /* Set fp: used as the base address for stacked BPF registers. */
  1196. emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
  1197. /* Set up BPF frame pointer. */
  1198. emit(rv_addi(lo(fp), RV_REG_SP, bpf_stack_adjust), ctx);
  1199. emit(rv_addi(hi(fp), RV_REG_ZERO, 0), ctx);
  1200. /* Set up BPF context pointer. */
  1201. emit(rv_addi(lo(r1), RV_REG_A0, 0), ctx);
  1202. emit(rv_addi(hi(r1), RV_REG_ZERO, 0), ctx);
  1203. ctx->stack_size = stack_adjust;
  1204. }
  1205. void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
  1206. {
  1207. __build_epilogue(false, ctx);
  1208. }