bpf_jit_comp.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Just-In-Time compiler for eBPF bytecode on 32-bit and 64-bit MIPS.
  4. *
  5. * Copyright (c) 2021 Anyfi Networks AB.
  6. * Author: Johan Almbladh <[email protected]>
  7. *
  8. * Based on code and ideas from
  9. * Copyright (c) 2017 Cavium, Inc.
  10. * Copyright (c) 2017 Shubham Bansal <[email protected]>
  11. * Copyright (c) 2011 Mircea Gherzan <[email protected]>
  12. */
  13. #ifndef _BPF_JIT_COMP_H
  14. #define _BPF_JIT_COMP_H
  15. /* MIPS registers */
  16. #define MIPS_R_ZERO 0 /* Const zero */
  17. #define MIPS_R_AT 1 /* Asm temp */
  18. #define MIPS_R_V0 2 /* Result */
  19. #define MIPS_R_V1 3 /* Result */
  20. #define MIPS_R_A0 4 /* Argument */
  21. #define MIPS_R_A1 5 /* Argument */
  22. #define MIPS_R_A2 6 /* Argument */
  23. #define MIPS_R_A3 7 /* Argument */
  24. #define MIPS_R_A4 8 /* Arg (n64) */
  25. #define MIPS_R_A5 9 /* Arg (n64) */
  26. #define MIPS_R_A6 10 /* Arg (n64) */
  27. #define MIPS_R_A7 11 /* Arg (n64) */
  28. #define MIPS_R_T0 8 /* Temp (o32) */
  29. #define MIPS_R_T1 9 /* Temp (o32) */
  30. #define MIPS_R_T2 10 /* Temp (o32) */
  31. #define MIPS_R_T3 11 /* Temp (o32) */
  32. #define MIPS_R_T4 12 /* Temporary */
  33. #define MIPS_R_T5 13 /* Temporary */
  34. #define MIPS_R_T6 14 /* Temporary */
  35. #define MIPS_R_T7 15 /* Temporary */
  36. #define MIPS_R_S0 16 /* Saved */
  37. #define MIPS_R_S1 17 /* Saved */
  38. #define MIPS_R_S2 18 /* Saved */
  39. #define MIPS_R_S3 19 /* Saved */
  40. #define MIPS_R_S4 20 /* Saved */
  41. #define MIPS_R_S5 21 /* Saved */
  42. #define MIPS_R_S6 22 /* Saved */
  43. #define MIPS_R_S7 23 /* Saved */
  44. #define MIPS_R_T8 24 /* Temporary */
  45. #define MIPS_R_T9 25 /* Temporary */
  46. /* MIPS_R_K0 26 Reserved */
  47. /* MIPS_R_K1 27 Reserved */
  48. #define MIPS_R_GP 28 /* Global ptr */
  49. #define MIPS_R_SP 29 /* Stack ptr */
  50. #define MIPS_R_FP 30 /* Frame ptr */
  51. #define MIPS_R_RA 31 /* Return */
  52. /*
  53. * Jump address mask for immediate jumps. The four most significant bits
  54. * must be equal to PC.
  55. */
  56. #define MIPS_JMP_MASK 0x0fffffffUL
  57. /* Maximum number of iterations in offset table computation */
  58. #define JIT_MAX_ITERATIONS 8
  59. /*
  60. * Jump pseudo-instructions used internally
  61. * for branch conversion and branch optimization.
  62. */
  63. #define JIT_JNSET 0xe0
  64. #define JIT_JNOP 0xf0
  65. /* Descriptor flag for PC-relative branch conversion */
  66. #define JIT_DESC_CONVERT BIT(31)
  67. /* JIT context for an eBPF program */
  68. struct jit_context {
  69. struct bpf_prog *program; /* The eBPF program being JITed */
  70. u32 *descriptors; /* eBPF to JITed CPU insn descriptors */
  71. u32 *target; /* JITed code buffer */
  72. u32 bpf_index; /* Index of current BPF program insn */
  73. u32 jit_index; /* Index of current JIT target insn */
  74. u32 changes; /* Number of PC-relative branch conv */
  75. u32 accessed; /* Bit mask of read eBPF registers */
  76. u32 clobbered; /* Bit mask of modified CPU registers */
  77. u32 stack_size; /* Total allocated stack size in bytes */
  78. u32 saved_size; /* Size of callee-saved registers */
  79. u32 stack_used; /* Stack size used for function calls */
  80. };
  81. /* Emit the instruction if the JIT memory space has been allocated */
  82. #define __emit(ctx, func, ...) \
  83. do { \
  84. if ((ctx)->target != NULL) { \
  85. u32 *p = &(ctx)->target[ctx->jit_index]; \
  86. uasm_i_##func(&p, ##__VA_ARGS__); \
  87. } \
  88. (ctx)->jit_index++; \
  89. } while (0)
  90. #define emit(...) __emit(__VA_ARGS__)
  91. /* Workaround for R10000 ll/sc errata */
  92. #ifdef CONFIG_WAR_R10000_LLSC
  93. #define LLSC_beqz beqzl
  94. #else
  95. #define LLSC_beqz beqz
  96. #endif
  97. /* Workaround for Loongson-3 ll/sc errata */
  98. #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
  99. #define LLSC_sync(ctx) emit(ctx, sync, 0)
  100. #define LLSC_offset 4
  101. #else
  102. #define LLSC_sync(ctx)
  103. #define LLSC_offset 0
  104. #endif
  105. /* Workaround for Loongson-2F jump errata */
  106. #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
  107. #define JALR_MASK 0xffffffffcfffffffULL
  108. #else
  109. #define JALR_MASK (~0ULL)
  110. #endif
  111. /*
  112. * Mark a BPF register as accessed, it needs to be
  113. * initialized by the program if expected, e.g. FP.
  114. */
  115. static inline void access_reg(struct jit_context *ctx, u8 reg)
  116. {
  117. ctx->accessed |= BIT(reg);
  118. }
  119. /*
  120. * Mark a CPU register as clobbered, it needs to be
  121. * saved/restored by the program if callee-saved.
  122. */
  123. static inline void clobber_reg(struct jit_context *ctx, u8 reg)
  124. {
  125. ctx->clobbered |= BIT(reg);
  126. }
  127. /*
  128. * Push registers on the stack, starting at a given depth from the stack
  129. * pointer and increasing. The next depth to be written is returned.
  130. */
  131. int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth);
  132. /*
  133. * Pop registers from the stack, starting at a given depth from the stack
  134. * pointer and increasing. The next depth to be read is returned.
  135. */
  136. int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth);
  137. /* Compute the 28-bit jump target address from a BPF program location */
  138. int get_target(struct jit_context *ctx, u32 loc);
  139. /* Compute the PC-relative offset to relative BPF program offset */
  140. int get_offset(const struct jit_context *ctx, int off);
  141. /* dst = imm (32-bit) */
  142. void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm);
  143. /* dst = src (32-bit) */
  144. void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src);
  145. /* Validate ALU/ALU64 immediate range */
  146. bool valid_alu_i(u8 op, s32 imm);
  147. /* Rewrite ALU/ALU64 immediate operation */
  148. bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val);
  149. /* ALU immediate operation (32-bit) */
  150. void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op);
  151. /* ALU register operation (32-bit) */
  152. void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op);
  153. /* Atomic read-modify-write (32-bit) */
  154. void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code);
  155. /* Atomic compare-and-exchange (32-bit) */
  156. void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off);
  157. /* Swap bytes and truncate a register word or half word */
  158. void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width);
  159. /* Validate JMP/JMP32 immediate range */
  160. bool valid_jmp_i(u8 op, s32 imm);
  161. /* Prepare a PC-relative jump operation with immediate conditional */
  162. void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
  163. u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off);
  164. /* Prepare a PC-relative jump operation with register conditional */
  165. void setup_jmp_r(struct jit_context *ctx, bool same_reg,
  166. u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off);
  167. /* Finish a PC-relative jump operation */
  168. int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off);
  169. /* Conditional JMP/JMP32 immediate */
  170. void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op);
  171. /* Conditional JMP/JMP32 register */
  172. void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op);
  173. /* Jump always */
  174. int emit_ja(struct jit_context *ctx, s16 off);
  175. /* Jump to epilogue */
  176. int emit_exit(struct jit_context *ctx);
  177. /*
  178. * Build program prologue to set up the stack and registers.
  179. * This function is implemented separately for 32-bit and 64-bit JITs.
  180. */
  181. void build_prologue(struct jit_context *ctx);
  182. /*
  183. * Build the program epilogue to restore the stack and registers.
  184. * This function is implemented separately for 32-bit and 64-bit JITs.
  185. */
  186. void build_epilogue(struct jit_context *ctx, int dest_reg);
  187. /*
  188. * Convert an eBPF instruction to native instruction, i.e
  189. * JITs an eBPF instruction.
  190. * Returns :
  191. * 0 - Successfully JITed an 8-byte eBPF instruction
  192. * >0 - Successfully JITed a 16-byte eBPF instruction
  193. * <0 - Failed to JIT.
  194. * This function is implemented separately for 32-bit and 64-bit JITs.
  195. */
  196. int build_insn(const struct bpf_insn *insn, struct jit_context *ctx);
  197. #endif /* _BPF_JIT_COMP_H */