bpf_jit_comp64.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * bpf_jit_comp64.c: eBPF JIT compiler
  4. *
  5. * Copyright 2016 Naveen N. Rao <[email protected]>
  6. * IBM Corporation
  7. *
  8. * Based on the powerpc classic BPF JIT compiler by Matt Evans
  9. */
  10. #include <linux/moduleloader.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/asm-compat.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/filter.h>
  15. #include <linux/if_vlan.h>
  16. #include <asm/kprobes.h>
  17. #include <linux/bpf.h>
  18. #include <asm/security_features.h>
  19. #include "bpf_jit.h"
  20. /*
  21. * Stack layout:
  22. * Ensure the top half (upto local_tmp_var) stays consistent
  23. * with our redzone usage.
  24. *
  25. * [ prev sp ] <-------------
  26. * [ nv gpr save area ] 5*8 |
  27. * [ tail_call_cnt ] 8 |
  28. * [ local_tmp_var ] 16 |
  29. * fp (r31) --> [ ebpf stack space ] upto 512 |
  30. * [ frame header ] 32/112 |
  31. * sp (r1) ---> [ stack pointer ] --------------
  32. */
  33. /* for gpr non volatile registers BPG_REG_6 to 10 */
  34. #define BPF_PPC_STACK_SAVE (5*8)
  35. /* for bpf JIT code internal usage */
  36. #define BPF_PPC_STACK_LOCALS 24
  37. /* stack frame excluding BPF stack, ensure this is quadword aligned */
  38. #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
  39. BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
  40. /* BPF register usage */
  41. #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
  42. #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
  43. /* BPF to ppc register mappings */
  44. void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
  45. {
  46. /* function return value */
  47. ctx->b2p[BPF_REG_0] = _R8;
  48. /* function arguments */
  49. ctx->b2p[BPF_REG_1] = _R3;
  50. ctx->b2p[BPF_REG_2] = _R4;
  51. ctx->b2p[BPF_REG_3] = _R5;
  52. ctx->b2p[BPF_REG_4] = _R6;
  53. ctx->b2p[BPF_REG_5] = _R7;
  54. /* non volatile registers */
  55. ctx->b2p[BPF_REG_6] = _R27;
  56. ctx->b2p[BPF_REG_7] = _R28;
  57. ctx->b2p[BPF_REG_8] = _R29;
  58. ctx->b2p[BPF_REG_9] = _R30;
  59. /* frame pointer aka BPF_REG_10 */
  60. ctx->b2p[BPF_REG_FP] = _R31;
  61. /* eBPF jit internal registers */
  62. ctx->b2p[BPF_REG_AX] = _R12;
  63. ctx->b2p[TMP_REG_1] = _R9;
  64. ctx->b2p[TMP_REG_2] = _R10;
  65. }
  66. /* PPC NVR range -- update this if we ever use NVRs below r27 */
  67. #define BPF_PPC_NVR_MIN _R27
  68. static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  69. {
  70. /*
  71. * We only need a stack frame if:
  72. * - we call other functions (kernel helpers), or
  73. * - the bpf program uses its stack area
  74. * The latter condition is deduced from the usage of BPF_REG_FP
  75. */
  76. return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
  77. }
  78. /*
  79. * When not setting up our own stackframe, the redzone usage is:
  80. *
  81. * [ prev sp ] <-------------
  82. * [ ... ] |
  83. * sp (r1) ---> [ stack pointer ] --------------
  84. * [ nv gpr save area ] 5*8
  85. * [ tail_call_cnt ] 8
  86. * [ local_tmp_var ] 16
  87. * [ unused red zone ] 208 bytes protected
  88. */
  89. static int bpf_jit_stack_local(struct codegen_context *ctx)
  90. {
  91. if (bpf_has_stack_frame(ctx))
  92. return STACK_FRAME_MIN_SIZE + ctx->stack_size;
  93. else
  94. return -(BPF_PPC_STACK_SAVE + 24);
  95. }
  96. static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
  97. {
  98. return bpf_jit_stack_local(ctx) + 16;
  99. }
  100. static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
  101. {
  102. if (reg >= BPF_PPC_NVR_MIN && reg < 32)
  103. return (bpf_has_stack_frame(ctx) ?
  104. (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
  105. - (8 * (32 - reg));
  106. pr_err("BPF JIT is asking about unknown registers");
  107. BUG();
  108. }
  109. void bpf_jit_realloc_regs(struct codegen_context *ctx)
  110. {
  111. }
  112. void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
  113. {
  114. int i;
  115. if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
  116. EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
  117. /*
  118. * Initialize tail_call_cnt if we do tail calls.
  119. * Otherwise, put in NOPs so that it can be skipped when we are
  120. * invoked through a tail call.
  121. */
  122. if (ctx->seen & SEEN_TAILCALL) {
  123. EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
  124. /* this goes in the redzone */
  125. EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
  126. } else {
  127. EMIT(PPC_RAW_NOP());
  128. EMIT(PPC_RAW_NOP());
  129. }
  130. if (bpf_has_stack_frame(ctx)) {
  131. /*
  132. * We need a stack frame, but we don't necessarily need to
  133. * save/restore LR unless we call other functions
  134. */
  135. if (ctx->seen & SEEN_FUNC) {
  136. EMIT(PPC_RAW_MFLR(_R0));
  137. EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
  138. }
  139. EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
  140. }
  141. /*
  142. * Back up non-volatile regs -- BPF registers 6-10
  143. * If we haven't created our own stack frame, we save these
  144. * in the protected zone below the previous stack frame
  145. */
  146. for (i = BPF_REG_6; i <= BPF_REG_10; i++)
  147. if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
  148. EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
  149. /* Setup frame pointer to point to the bpf stack area */
  150. if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
  151. EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
  152. STACK_FRAME_MIN_SIZE + ctx->stack_size));
  153. }
  154. static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
  155. {
  156. int i;
  157. /* Restore NVRs */
  158. for (i = BPF_REG_6; i <= BPF_REG_10; i++)
  159. if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
  160. EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
  161. /* Tear down our stack frame */
  162. if (bpf_has_stack_frame(ctx)) {
  163. EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
  164. if (ctx->seen & SEEN_FUNC) {
  165. EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
  166. EMIT(PPC_RAW_MTLR(_R0));
  167. }
  168. }
  169. }
  170. void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
  171. {
  172. bpf_jit_emit_common_epilogue(image, ctx);
  173. /* Move result to r3 */
  174. EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
  175. EMIT(PPC_RAW_BLR());
  176. }
  177. static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
  178. {
  179. unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
  180. long reladdr;
  181. if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
  182. return -EINVAL;
  183. reladdr = func_addr - kernel_toc_addr();
  184. if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
  185. pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
  186. return -ERANGE;
  187. }
  188. EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
  189. EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
  190. EMIT(PPC_RAW_MTCTR(_R12));
  191. EMIT(PPC_RAW_BCTRL());
  192. return 0;
  193. }
  194. int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
  195. {
  196. unsigned int i, ctx_idx = ctx->idx;
  197. if (WARN_ON_ONCE(func && is_module_text_address(func)))
  198. return -EINVAL;
  199. /* skip past descriptor if elf v1 */
  200. func += FUNCTION_DESCR_SIZE;
  201. /* Load function address into r12 */
  202. PPC_LI64(_R12, func);
  203. /* For bpf-to-bpf function calls, the callee's address is unknown
  204. * until the last extra pass. As seen above, we use PPC_LI64() to
  205. * load the callee's address, but this may optimize the number of
  206. * instructions required based on the nature of the address.
  207. *
  208. * Since we don't want the number of instructions emitted to change,
  209. * we pad the optimized PPC_LI64() call with NOPs to guarantee that
  210. * we always have a five-instruction sequence, which is the maximum
  211. * that PPC_LI64() can emit.
  212. */
  213. for (i = ctx->idx - ctx_idx; i < 5; i++)
  214. EMIT(PPC_RAW_NOP());
  215. EMIT(PPC_RAW_MTCTR(_R12));
  216. EMIT(PPC_RAW_BCTRL());
  217. return 0;
  218. }
  219. static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
  220. {
  221. /*
  222. * By now, the eBPF program has already setup parameters in r3, r4 and r5
  223. * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
  224. * r4/BPF_REG_2 - pointer to bpf_array
  225. * r5/BPF_REG_3 - index in bpf_array
  226. */
  227. int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
  228. int b2p_index = bpf_to_ppc(BPF_REG_3);
  229. int bpf_tailcall_prologue_size = 8;
  230. if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
  231. bpf_tailcall_prologue_size += 4; /* skip past the toc load */
  232. /*
  233. * if (index >= array->map.max_entries)
  234. * goto out;
  235. */
  236. EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
  237. EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
  238. EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
  239. PPC_BCC_SHORT(COND_GE, out);
  240. /*
  241. * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
  242. * goto out;
  243. */
  244. EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
  245. EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
  246. PPC_BCC_SHORT(COND_GE, out);
  247. /*
  248. * tail_call_cnt++;
  249. */
  250. EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
  251. EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
  252. /* prog = array->ptrs[index]; */
  253. EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
  254. EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
  255. EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
  256. /*
  257. * if (prog == NULL)
  258. * goto out;
  259. */
  260. EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
  261. PPC_BCC_SHORT(COND_EQ, out);
  262. /* goto *(prog->bpf_func + prologue_size); */
  263. EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
  264. EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
  265. FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
  266. EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
  267. /* tear down stack, restore NVRs, ... */
  268. bpf_jit_emit_common_epilogue(image, ctx);
  269. EMIT(PPC_RAW_BCTR());
  270. /* out: */
  271. return 0;
  272. }
  273. /*
  274. * We spill into the redzone always, even if the bpf program has its own stackframe.
  275. * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
  276. */
  277. void bpf_stf_barrier(void);
  278. asm (
  279. " .global bpf_stf_barrier ;"
  280. " bpf_stf_barrier: ;"
  281. " std 21,-64(1) ;"
  282. " std 22,-56(1) ;"
  283. " sync ;"
  284. " ld 21,-64(1) ;"
  285. " ld 22,-56(1) ;"
  286. " ori 31,31,0 ;"
  287. " .rept 14 ;"
  288. " b 1f ;"
  289. " 1: ;"
  290. " .endr ;"
  291. " blr ;"
  292. );
  293. /* Assemble the body code between the prologue & epilogue */
  294. int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
  295. u32 *addrs, int pass)
  296. {
  297. enum stf_barrier_type stf_barrier = stf_barrier_type_get();
  298. const struct bpf_insn *insn = fp->insnsi;
  299. int flen = fp->len;
  300. int i, ret;
  301. /* Start of epilogue code - will only be valid 2nd pass onwards */
  302. u32 exit_addr = addrs[flen];
  303. for (i = 0; i < flen; i++) {
  304. u32 code = insn[i].code;
  305. u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
  306. u32 src_reg = bpf_to_ppc(insn[i].src_reg);
  307. u32 size = BPF_SIZE(code);
  308. u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
  309. u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
  310. u32 save_reg, ret_reg;
  311. s16 off = insn[i].off;
  312. s32 imm = insn[i].imm;
  313. bool func_addr_fixed;
  314. u64 func_addr;
  315. u64 imm64;
  316. u32 true_cond;
  317. u32 tmp_idx;
  318. int j;
  319. /*
  320. * addrs[] maps a BPF bytecode address into a real offset from
  321. * the start of the body code.
  322. */
  323. addrs[i] = ctx->idx * 4;
  324. /*
  325. * As an optimization, we note down which non-volatile registers
  326. * are used so that we can only save/restore those in our
  327. * prologue and epilogue. We do this here regardless of whether
  328. * the actual BPF instruction uses src/dst registers or not
  329. * (for instance, BPF_CALL does not use them). The expectation
  330. * is that those instructions will have src_reg/dst_reg set to
  331. * 0. Even otherwise, we just lose some prologue/epilogue
  332. * optimization but everything else should work without
  333. * any issues.
  334. */
  335. if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
  336. bpf_set_seen_register(ctx, dst_reg);
  337. if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
  338. bpf_set_seen_register(ctx, src_reg);
  339. switch (code) {
  340. /*
  341. * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
  342. */
  343. case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
  344. case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
  345. EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
  346. goto bpf_alu32_trunc;
  347. case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
  348. case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
  349. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
  350. goto bpf_alu32_trunc;
  351. case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
  352. case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
  353. if (!imm) {
  354. goto bpf_alu32_trunc;
  355. } else if (imm >= -32768 && imm < 32768) {
  356. EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
  357. } else {
  358. PPC_LI32(tmp1_reg, imm);
  359. EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
  360. }
  361. goto bpf_alu32_trunc;
  362. case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
  363. case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
  364. if (!imm) {
  365. goto bpf_alu32_trunc;
  366. } else if (imm > -32768 && imm <= 32768) {
  367. EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
  368. } else {
  369. PPC_LI32(tmp1_reg, imm);
  370. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
  371. }
  372. goto bpf_alu32_trunc;
  373. case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
  374. case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
  375. if (BPF_CLASS(code) == BPF_ALU)
  376. EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
  377. else
  378. EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
  379. goto bpf_alu32_trunc;
  380. case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
  381. case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
  382. if (imm >= -32768 && imm < 32768)
  383. EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
  384. else {
  385. PPC_LI32(tmp1_reg, imm);
  386. if (BPF_CLASS(code) == BPF_ALU)
  387. EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
  388. else
  389. EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
  390. }
  391. goto bpf_alu32_trunc;
  392. case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
  393. case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
  394. if (BPF_OP(code) == BPF_MOD) {
  395. EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
  396. EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
  397. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
  398. } else
  399. EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
  400. goto bpf_alu32_trunc;
  401. case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
  402. case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
  403. if (BPF_OP(code) == BPF_MOD) {
  404. EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
  405. EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
  406. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
  407. } else
  408. EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
  409. break;
  410. case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
  411. case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
  412. case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
  413. case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
  414. if (imm == 0)
  415. return -EINVAL;
  416. if (imm == 1) {
  417. if (BPF_OP(code) == BPF_DIV) {
  418. goto bpf_alu32_trunc;
  419. } else {
  420. EMIT(PPC_RAW_LI(dst_reg, 0));
  421. break;
  422. }
  423. }
  424. PPC_LI32(tmp1_reg, imm);
  425. switch (BPF_CLASS(code)) {
  426. case BPF_ALU:
  427. if (BPF_OP(code) == BPF_MOD) {
  428. EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
  429. EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
  430. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
  431. } else
  432. EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
  433. break;
  434. case BPF_ALU64:
  435. if (BPF_OP(code) == BPF_MOD) {
  436. EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
  437. EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
  438. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
  439. } else
  440. EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
  441. break;
  442. }
  443. goto bpf_alu32_trunc;
  444. case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
  445. case BPF_ALU64 | BPF_NEG: /* dst = -dst */
  446. EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
  447. goto bpf_alu32_trunc;
  448. /*
  449. * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
  450. */
  451. case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
  452. case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
  453. EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
  454. goto bpf_alu32_trunc;
  455. case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
  456. case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
  457. if (!IMM_H(imm))
  458. EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
  459. else {
  460. /* Sign-extended */
  461. PPC_LI32(tmp1_reg, imm);
  462. EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
  463. }
  464. goto bpf_alu32_trunc;
  465. case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
  466. case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
  467. EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
  468. goto bpf_alu32_trunc;
  469. case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
  470. case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
  471. if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
  472. /* Sign-extended */
  473. PPC_LI32(tmp1_reg, imm);
  474. EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
  475. } else {
  476. if (IMM_L(imm))
  477. EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
  478. if (IMM_H(imm))
  479. EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
  480. }
  481. goto bpf_alu32_trunc;
  482. case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
  483. case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
  484. EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
  485. goto bpf_alu32_trunc;
  486. case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
  487. case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
  488. if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
  489. /* Sign-extended */
  490. PPC_LI32(tmp1_reg, imm);
  491. EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
  492. } else {
  493. if (IMM_L(imm))
  494. EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
  495. if (IMM_H(imm))
  496. EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
  497. }
  498. goto bpf_alu32_trunc;
  499. case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
  500. /* slw clears top 32 bits */
  501. EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
  502. /* skip zero extension move, but set address map. */
  503. if (insn_is_zext(&insn[i + 1]))
  504. addrs[++i] = ctx->idx * 4;
  505. break;
  506. case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
  507. EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
  508. break;
  509. case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
  510. /* with imm 0, we still need to clear top 32 bits */
  511. EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
  512. if (insn_is_zext(&insn[i + 1]))
  513. addrs[++i] = ctx->idx * 4;
  514. break;
  515. case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
  516. if (imm != 0)
  517. EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
  518. break;
  519. case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
  520. EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
  521. if (insn_is_zext(&insn[i + 1]))
  522. addrs[++i] = ctx->idx * 4;
  523. break;
  524. case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
  525. EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
  526. break;
  527. case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
  528. EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
  529. if (insn_is_zext(&insn[i + 1]))
  530. addrs[++i] = ctx->idx * 4;
  531. break;
  532. case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
  533. if (imm != 0)
  534. EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
  535. break;
  536. case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
  537. EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
  538. goto bpf_alu32_trunc;
  539. case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
  540. EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
  541. break;
  542. case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
  543. EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
  544. goto bpf_alu32_trunc;
  545. case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
  546. if (imm != 0)
  547. EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
  548. break;
  549. /*
  550. * MOV
  551. */
  552. case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
  553. case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
  554. if (imm == 1) {
  555. /* special mov32 for zext */
  556. EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
  557. break;
  558. }
  559. EMIT(PPC_RAW_MR(dst_reg, src_reg));
  560. goto bpf_alu32_trunc;
  561. case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
  562. case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
  563. PPC_LI32(dst_reg, imm);
  564. if (imm < 0)
  565. goto bpf_alu32_trunc;
  566. else if (insn_is_zext(&insn[i + 1]))
  567. addrs[++i] = ctx->idx * 4;
  568. break;
  569. bpf_alu32_trunc:
  570. /* Truncate to 32-bits */
  571. if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
  572. EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
  573. break;
  574. /*
  575. * BPF_FROM_BE/LE
  576. */
  577. case BPF_ALU | BPF_END | BPF_FROM_LE:
  578. case BPF_ALU | BPF_END | BPF_FROM_BE:
  579. #ifdef __BIG_ENDIAN__
  580. if (BPF_SRC(code) == BPF_FROM_BE)
  581. goto emit_clear;
  582. #else /* !__BIG_ENDIAN__ */
  583. if (BPF_SRC(code) == BPF_FROM_LE)
  584. goto emit_clear;
  585. #endif
  586. switch (imm) {
  587. case 16:
  588. /* Rotate 8 bits left & mask with 0x0000ff00 */
  589. EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
  590. /* Rotate 8 bits right & insert LSB to reg */
  591. EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
  592. /* Move result back to dst_reg */
  593. EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
  594. break;
  595. case 32:
  596. /*
  597. * Rotate word left by 8 bits:
  598. * 2 bytes are already in their final position
  599. * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
  600. */
  601. EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
  602. /* Rotate 24 bits and insert byte 1 */
  603. EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
  604. /* Rotate 24 bits and insert byte 3 */
  605. EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
  606. EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
  607. break;
  608. case 64:
  609. /* Store the value to stack and then use byte-reverse loads */
  610. EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
  611. EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
  612. if (cpu_has_feature(CPU_FTR_ARCH_206)) {
  613. EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
  614. } else {
  615. EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
  616. if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
  617. EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
  618. EMIT(PPC_RAW_LI(tmp2_reg, 4));
  619. EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
  620. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  621. EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
  622. EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
  623. }
  624. break;
  625. }
  626. break;
  627. emit_clear:
  628. switch (imm) {
  629. case 16:
  630. /* zero-extend 16 bits into 64 bits */
  631. EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
  632. if (insn_is_zext(&insn[i + 1]))
  633. addrs[++i] = ctx->idx * 4;
  634. break;
  635. case 32:
  636. if (!fp->aux->verifier_zext)
  637. /* zero-extend 32 bits into 64 bits */
  638. EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
  639. break;
  640. case 64:
  641. /* nop */
  642. break;
  643. }
  644. break;
  645. /*
  646. * BPF_ST NOSPEC (speculation barrier)
  647. */
  648. case BPF_ST | BPF_NOSPEC:
  649. if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
  650. !security_ftr_enabled(SEC_FTR_STF_BARRIER))
  651. break;
  652. switch (stf_barrier) {
  653. case STF_BARRIER_EIEIO:
  654. EMIT(PPC_RAW_EIEIO() | 0x02000000);
  655. break;
  656. case STF_BARRIER_SYNC_ORI:
  657. EMIT(PPC_RAW_SYNC());
  658. EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
  659. EMIT(PPC_RAW_ORI(_R31, _R31, 0));
  660. break;
  661. case STF_BARRIER_FALLBACK:
  662. ctx->seen |= SEEN_FUNC;
  663. PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
  664. EMIT(PPC_RAW_MTCTR(_R12));
  665. EMIT(PPC_RAW_BCTRL());
  666. break;
  667. case STF_BARRIER_NONE:
  668. break;
  669. }
  670. break;
  671. /*
  672. * BPF_ST(X)
  673. */
  674. case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
  675. case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
  676. if (BPF_CLASS(code) == BPF_ST) {
  677. EMIT(PPC_RAW_LI(tmp1_reg, imm));
  678. src_reg = tmp1_reg;
  679. }
  680. EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
  681. break;
  682. case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
  683. case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
  684. if (BPF_CLASS(code) == BPF_ST) {
  685. EMIT(PPC_RAW_LI(tmp1_reg, imm));
  686. src_reg = tmp1_reg;
  687. }
  688. EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
  689. break;
  690. case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
  691. case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
  692. if (BPF_CLASS(code) == BPF_ST) {
  693. PPC_LI32(tmp1_reg, imm);
  694. src_reg = tmp1_reg;
  695. }
  696. EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
  697. break;
  698. case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
  699. case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
  700. if (BPF_CLASS(code) == BPF_ST) {
  701. PPC_LI32(tmp1_reg, imm);
  702. src_reg = tmp1_reg;
  703. }
  704. if (off % 4) {
  705. EMIT(PPC_RAW_LI(tmp2_reg, off));
  706. EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
  707. } else {
  708. EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
  709. }
  710. break;
  711. /*
  712. * BPF_STX ATOMIC (atomic ops)
  713. */
  714. case BPF_STX | BPF_ATOMIC | BPF_W:
  715. case BPF_STX | BPF_ATOMIC | BPF_DW:
  716. save_reg = tmp2_reg;
  717. ret_reg = src_reg;
  718. /* Get offset into TMP_REG_1 */
  719. EMIT(PPC_RAW_LI(tmp1_reg, off));
  720. tmp_idx = ctx->idx * 4;
  721. /* load value from memory into TMP_REG_2 */
  722. if (size == BPF_DW)
  723. EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
  724. else
  725. EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
  726. /* Save old value in _R0 */
  727. if (imm & BPF_FETCH)
  728. EMIT(PPC_RAW_MR(_R0, tmp2_reg));
  729. switch (imm) {
  730. case BPF_ADD:
  731. case BPF_ADD | BPF_FETCH:
  732. EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
  733. break;
  734. case BPF_AND:
  735. case BPF_AND | BPF_FETCH:
  736. EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
  737. break;
  738. case BPF_OR:
  739. case BPF_OR | BPF_FETCH:
  740. EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
  741. break;
  742. case BPF_XOR:
  743. case BPF_XOR | BPF_FETCH:
  744. EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
  745. break;
  746. case BPF_CMPXCHG:
  747. /*
  748. * Return old value in BPF_REG_0 for BPF_CMPXCHG &
  749. * in src_reg for other cases.
  750. */
  751. ret_reg = bpf_to_ppc(BPF_REG_0);
  752. /* Compare with old value in BPF_R0 */
  753. if (size == BPF_DW)
  754. EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
  755. else
  756. EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
  757. /* Don't set if different from old value */
  758. PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
  759. fallthrough;
  760. case BPF_XCHG:
  761. save_reg = src_reg;
  762. break;
  763. default:
  764. pr_err_ratelimited(
  765. "eBPF filter atomic op code %02x (@%d) unsupported\n",
  766. code, i);
  767. return -EOPNOTSUPP;
  768. }
  769. /* store new value */
  770. if (size == BPF_DW)
  771. EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
  772. else
  773. EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
  774. /* we're done if this succeeded */
  775. PPC_BCC_SHORT(COND_NE, tmp_idx);
  776. if (imm & BPF_FETCH) {
  777. EMIT(PPC_RAW_MR(ret_reg, _R0));
  778. /*
  779. * Skip unnecessary zero-extension for 32-bit cmpxchg.
  780. * For context, see commit 39491867ace5.
  781. */
  782. if (size != BPF_DW && imm == BPF_CMPXCHG &&
  783. insn_is_zext(&insn[i + 1]))
  784. addrs[++i] = ctx->idx * 4;
  785. }
  786. break;
  787. /*
  788. * BPF_LDX
  789. */
  790. /* dst = *(u8 *)(ul) (src + off) */
  791. case BPF_LDX | BPF_MEM | BPF_B:
  792. case BPF_LDX | BPF_PROBE_MEM | BPF_B:
  793. /* dst = *(u16 *)(ul) (src + off) */
  794. case BPF_LDX | BPF_MEM | BPF_H:
  795. case BPF_LDX | BPF_PROBE_MEM | BPF_H:
  796. /* dst = *(u32 *)(ul) (src + off) */
  797. case BPF_LDX | BPF_MEM | BPF_W:
  798. case BPF_LDX | BPF_PROBE_MEM | BPF_W:
  799. /* dst = *(u64 *)(ul) (src + off) */
  800. case BPF_LDX | BPF_MEM | BPF_DW:
  801. case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
  802. /*
  803. * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
  804. * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
  805. * load only if addr is kernel address (see is_kernel_addr()), otherwise
  806. * set dst_reg=0 and move on.
  807. */
  808. if (BPF_MODE(code) == BPF_PROBE_MEM) {
  809. EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
  810. if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
  811. PPC_LI64(tmp2_reg, 0x8000000000000000ul);
  812. else /* BOOK3S_64 */
  813. PPC_LI64(tmp2_reg, PAGE_OFFSET);
  814. EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
  815. PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
  816. EMIT(PPC_RAW_LI(dst_reg, 0));
  817. /*
  818. * Check if 'off' is word aligned for BPF_DW, because
  819. * we might generate two instructions.
  820. */
  821. if (BPF_SIZE(code) == BPF_DW && (off & 3))
  822. PPC_JMP((ctx->idx + 3) * 4);
  823. else
  824. PPC_JMP((ctx->idx + 2) * 4);
  825. }
  826. switch (size) {
  827. case BPF_B:
  828. EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
  829. break;
  830. case BPF_H:
  831. EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
  832. break;
  833. case BPF_W:
  834. EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
  835. break;
  836. case BPF_DW:
  837. if (off % 4) {
  838. EMIT(PPC_RAW_LI(tmp1_reg, off));
  839. EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
  840. } else {
  841. EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
  842. }
  843. break;
  844. }
  845. if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
  846. addrs[++i] = ctx->idx * 4;
  847. if (BPF_MODE(code) == BPF_PROBE_MEM) {
  848. ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
  849. 4, dst_reg);
  850. if (ret)
  851. return ret;
  852. }
  853. break;
  854. /*
  855. * Doubleword load
  856. * 16 byte instruction that uses two 'struct bpf_insn'
  857. */
  858. case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
  859. imm64 = ((u64)(u32) insn[i].imm) |
  860. (((u64)(u32) insn[i+1].imm) << 32);
  861. tmp_idx = ctx->idx;
  862. PPC_LI64(dst_reg, imm64);
  863. /* padding to allow full 5 instructions for later patching */
  864. for (j = ctx->idx - tmp_idx; j < 5; j++)
  865. EMIT(PPC_RAW_NOP());
  866. /* Adjust for two bpf instructions */
  867. addrs[++i] = ctx->idx * 4;
  868. break;
  869. /*
  870. * Return/Exit
  871. */
  872. case BPF_JMP | BPF_EXIT:
  873. /*
  874. * If this isn't the very last instruction, branch to
  875. * the epilogue. If we _are_ the last instruction,
  876. * we'll just fall through to the epilogue.
  877. */
  878. if (i != flen - 1) {
  879. ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
  880. if (ret)
  881. return ret;
  882. }
  883. /* else fall through to the epilogue */
  884. break;
  885. /*
  886. * Call kernel helper or bpf function
  887. */
  888. case BPF_JMP | BPF_CALL:
  889. ctx->seen |= SEEN_FUNC;
  890. ret = bpf_jit_get_func_addr(fp, &insn[i], false,
  891. &func_addr, &func_addr_fixed);
  892. if (ret < 0)
  893. return ret;
  894. if (func_addr_fixed)
  895. ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
  896. else
  897. ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
  898. if (ret)
  899. return ret;
  900. /* move return value from r3 to BPF_REG_0 */
  901. EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
  902. break;
  903. /*
  904. * Jumps and branches
  905. */
  906. case BPF_JMP | BPF_JA:
  907. PPC_JMP(addrs[i + 1 + off]);
  908. break;
  909. case BPF_JMP | BPF_JGT | BPF_K:
  910. case BPF_JMP | BPF_JGT | BPF_X:
  911. case BPF_JMP | BPF_JSGT | BPF_K:
  912. case BPF_JMP | BPF_JSGT | BPF_X:
  913. case BPF_JMP32 | BPF_JGT | BPF_K:
  914. case BPF_JMP32 | BPF_JGT | BPF_X:
  915. case BPF_JMP32 | BPF_JSGT | BPF_K:
  916. case BPF_JMP32 | BPF_JSGT | BPF_X:
  917. true_cond = COND_GT;
  918. goto cond_branch;
  919. case BPF_JMP | BPF_JLT | BPF_K:
  920. case BPF_JMP | BPF_JLT | BPF_X:
  921. case BPF_JMP | BPF_JSLT | BPF_K:
  922. case BPF_JMP | BPF_JSLT | BPF_X:
  923. case BPF_JMP32 | BPF_JLT | BPF_K:
  924. case BPF_JMP32 | BPF_JLT | BPF_X:
  925. case BPF_JMP32 | BPF_JSLT | BPF_K:
  926. case BPF_JMP32 | BPF_JSLT | BPF_X:
  927. true_cond = COND_LT;
  928. goto cond_branch;
  929. case BPF_JMP | BPF_JGE | BPF_K:
  930. case BPF_JMP | BPF_JGE | BPF_X:
  931. case BPF_JMP | BPF_JSGE | BPF_K:
  932. case BPF_JMP | BPF_JSGE | BPF_X:
  933. case BPF_JMP32 | BPF_JGE | BPF_K:
  934. case BPF_JMP32 | BPF_JGE | BPF_X:
  935. case BPF_JMP32 | BPF_JSGE | BPF_K:
  936. case BPF_JMP32 | BPF_JSGE | BPF_X:
  937. true_cond = COND_GE;
  938. goto cond_branch;
  939. case BPF_JMP | BPF_JLE | BPF_K:
  940. case BPF_JMP | BPF_JLE | BPF_X:
  941. case BPF_JMP | BPF_JSLE | BPF_K:
  942. case BPF_JMP | BPF_JSLE | BPF_X:
  943. case BPF_JMP32 | BPF_JLE | BPF_K:
  944. case BPF_JMP32 | BPF_JLE | BPF_X:
  945. case BPF_JMP32 | BPF_JSLE | BPF_K:
  946. case BPF_JMP32 | BPF_JSLE | BPF_X:
  947. true_cond = COND_LE;
  948. goto cond_branch;
  949. case BPF_JMP | BPF_JEQ | BPF_K:
  950. case BPF_JMP | BPF_JEQ | BPF_X:
  951. case BPF_JMP32 | BPF_JEQ | BPF_K:
  952. case BPF_JMP32 | BPF_JEQ | BPF_X:
  953. true_cond = COND_EQ;
  954. goto cond_branch;
  955. case BPF_JMP | BPF_JNE | BPF_K:
  956. case BPF_JMP | BPF_JNE | BPF_X:
  957. case BPF_JMP32 | BPF_JNE | BPF_K:
  958. case BPF_JMP32 | BPF_JNE | BPF_X:
  959. true_cond = COND_NE;
  960. goto cond_branch;
  961. case BPF_JMP | BPF_JSET | BPF_K:
  962. case BPF_JMP | BPF_JSET | BPF_X:
  963. case BPF_JMP32 | BPF_JSET | BPF_K:
  964. case BPF_JMP32 | BPF_JSET | BPF_X:
  965. true_cond = COND_NE;
  966. /* Fall through */
  967. cond_branch:
  968. switch (code) {
  969. case BPF_JMP | BPF_JGT | BPF_X:
  970. case BPF_JMP | BPF_JLT | BPF_X:
  971. case BPF_JMP | BPF_JGE | BPF_X:
  972. case BPF_JMP | BPF_JLE | BPF_X:
  973. case BPF_JMP | BPF_JEQ | BPF_X:
  974. case BPF_JMP | BPF_JNE | BPF_X:
  975. case BPF_JMP32 | BPF_JGT | BPF_X:
  976. case BPF_JMP32 | BPF_JLT | BPF_X:
  977. case BPF_JMP32 | BPF_JGE | BPF_X:
  978. case BPF_JMP32 | BPF_JLE | BPF_X:
  979. case BPF_JMP32 | BPF_JEQ | BPF_X:
  980. case BPF_JMP32 | BPF_JNE | BPF_X:
  981. /* unsigned comparison */
  982. if (BPF_CLASS(code) == BPF_JMP32)
  983. EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
  984. else
  985. EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
  986. break;
  987. case BPF_JMP | BPF_JSGT | BPF_X:
  988. case BPF_JMP | BPF_JSLT | BPF_X:
  989. case BPF_JMP | BPF_JSGE | BPF_X:
  990. case BPF_JMP | BPF_JSLE | BPF_X:
  991. case BPF_JMP32 | BPF_JSGT | BPF_X:
  992. case BPF_JMP32 | BPF_JSLT | BPF_X:
  993. case BPF_JMP32 | BPF_JSGE | BPF_X:
  994. case BPF_JMP32 | BPF_JSLE | BPF_X:
  995. /* signed comparison */
  996. if (BPF_CLASS(code) == BPF_JMP32)
  997. EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
  998. else
  999. EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
  1000. break;
  1001. case BPF_JMP | BPF_JSET | BPF_X:
  1002. case BPF_JMP32 | BPF_JSET | BPF_X:
  1003. if (BPF_CLASS(code) == BPF_JMP) {
  1004. EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
  1005. } else {
  1006. EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
  1007. EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
  1008. }
  1009. break;
  1010. case BPF_JMP | BPF_JNE | BPF_K:
  1011. case BPF_JMP | BPF_JEQ | BPF_K:
  1012. case BPF_JMP | BPF_JGT | BPF_K:
  1013. case BPF_JMP | BPF_JLT | BPF_K:
  1014. case BPF_JMP | BPF_JGE | BPF_K:
  1015. case BPF_JMP | BPF_JLE | BPF_K:
  1016. case BPF_JMP32 | BPF_JNE | BPF_K:
  1017. case BPF_JMP32 | BPF_JEQ | BPF_K:
  1018. case BPF_JMP32 | BPF_JGT | BPF_K:
  1019. case BPF_JMP32 | BPF_JLT | BPF_K:
  1020. case BPF_JMP32 | BPF_JGE | BPF_K:
  1021. case BPF_JMP32 | BPF_JLE | BPF_K:
  1022. {
  1023. bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
  1024. /*
  1025. * Need sign-extended load, so only positive
  1026. * values can be used as imm in cmpldi
  1027. */
  1028. if (imm >= 0 && imm < 32768) {
  1029. if (is_jmp32)
  1030. EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
  1031. else
  1032. EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
  1033. } else {
  1034. /* sign-extending load */
  1035. PPC_LI32(tmp1_reg, imm);
  1036. /* ... but unsigned comparison */
  1037. if (is_jmp32)
  1038. EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
  1039. else
  1040. EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
  1041. }
  1042. break;
  1043. }
  1044. case BPF_JMP | BPF_JSGT | BPF_K:
  1045. case BPF_JMP | BPF_JSLT | BPF_K:
  1046. case BPF_JMP | BPF_JSGE | BPF_K:
  1047. case BPF_JMP | BPF_JSLE | BPF_K:
  1048. case BPF_JMP32 | BPF_JSGT | BPF_K:
  1049. case BPF_JMP32 | BPF_JSLT | BPF_K:
  1050. case BPF_JMP32 | BPF_JSGE | BPF_K:
  1051. case BPF_JMP32 | BPF_JSLE | BPF_K:
  1052. {
  1053. bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
  1054. /*
  1055. * signed comparison, so any 16-bit value
  1056. * can be used in cmpdi
  1057. */
  1058. if (imm >= -32768 && imm < 32768) {
  1059. if (is_jmp32)
  1060. EMIT(PPC_RAW_CMPWI(dst_reg, imm));
  1061. else
  1062. EMIT(PPC_RAW_CMPDI(dst_reg, imm));
  1063. } else {
  1064. PPC_LI32(tmp1_reg, imm);
  1065. if (is_jmp32)
  1066. EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
  1067. else
  1068. EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
  1069. }
  1070. break;
  1071. }
  1072. case BPF_JMP | BPF_JSET | BPF_K:
  1073. case BPF_JMP32 | BPF_JSET | BPF_K:
  1074. /* andi does not sign-extend the immediate */
  1075. if (imm >= 0 && imm < 32768)
  1076. /* PPC_ANDI is _only/always_ dot-form */
  1077. EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
  1078. else {
  1079. PPC_LI32(tmp1_reg, imm);
  1080. if (BPF_CLASS(code) == BPF_JMP) {
  1081. EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
  1082. tmp1_reg));
  1083. } else {
  1084. EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
  1085. EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
  1086. 0, 0, 31));
  1087. }
  1088. }
  1089. break;
  1090. }
  1091. PPC_BCC(true_cond, addrs[i + 1 + off]);
  1092. break;
  1093. /*
  1094. * Tail call
  1095. */
  1096. case BPF_JMP | BPF_TAIL_CALL:
  1097. ctx->seen |= SEEN_TAILCALL;
  1098. ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
  1099. if (ret < 0)
  1100. return ret;
  1101. break;
  1102. default:
  1103. /*
  1104. * The filter contains something cruel & unusual.
  1105. * We don't handle it, but also there shouldn't be
  1106. * anything missing from our list.
  1107. */
  1108. pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
  1109. code, i);
  1110. return -ENOTSUPP;
  1111. }
  1112. }
  1113. /* Set end-of-body-code address for exit. */
  1114. addrs[i] = ctx->idx * 4;
  1115. return 0;
  1116. }