bpf_jit.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * bpf_jit.h: BPF JIT compiler for PPC
  4. *
  5. * Copyright 2011 Matt Evans <[email protected]>, IBM Corporation
  6. * 2016 Naveen N. Rao <[email protected]>
  7. */
  8. #ifndef _BPF_JIT_H
  9. #define _BPF_JIT_H
  10. #ifndef __ASSEMBLY__
  11. #include <asm/types.h>
  12. #include <asm/ppc-opcode.h>
  13. #ifdef CONFIG_PPC64_ELF_ABI_V1
  14. #define FUNCTION_DESCR_SIZE 24
  15. #else
  16. #define FUNCTION_DESCR_SIZE 0
  17. #endif
  18. #define PLANT_INSTR(d, idx, instr) \
  19. do { if (d) { (d)[idx] = instr; } idx++; } while (0)
  20. #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
  21. /* Long jump; (unconditional 'branch') */
  22. #define PPC_JMP(dest) \
  23. do { \
  24. long offset = (long)(dest) - (ctx->idx * 4); \
  25. if ((dest) != 0 && !is_offset_in_branch_range(offset)) { \
  26. pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
  27. return -ERANGE; \
  28. } \
  29. EMIT(PPC_RAW_BRANCH(offset)); \
  30. } while (0)
  31. /* bl (unconditional 'branch' with link) */
  32. #define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
  33. /* "cond" here covers BO:BI fields. */
  34. #define PPC_BCC_SHORT(cond, dest) \
  35. do { \
  36. long offset = (long)(dest) - (ctx->idx * 4); \
  37. if ((dest) != 0 && !is_offset_in_cond_branch_range(offset)) { \
  38. pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
  39. return -ERANGE; \
  40. } \
  41. EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
  42. } while (0)
  43. /* Sign-extended 32-bit immediate load */
  44. #define PPC_LI32(d, i) do { \
  45. if ((int)(uintptr_t)(i) >= -32768 && \
  46. (int)(uintptr_t)(i) < 32768) \
  47. EMIT(PPC_RAW_LI(d, i)); \
  48. else { \
  49. EMIT(PPC_RAW_LIS(d, IMM_H(i))); \
  50. if (IMM_L(i)) \
  51. EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \
  52. } } while(0)
  53. #ifdef CONFIG_PPC64
  54. #define PPC_LI64(d, i) do { \
  55. if ((long)(i) >= -2147483648 && \
  56. (long)(i) < 2147483648) \
  57. PPC_LI32(d, i); \
  58. else { \
  59. if (!((uintptr_t)(i) & 0xffff800000000000ULL)) \
  60. EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) & \
  61. 0xffff)); \
  62. else { \
  63. EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \
  64. if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
  65. EMIT(PPC_RAW_ORI(d, d, \
  66. ((uintptr_t)(i) >> 32) & 0xffff)); \
  67. } \
  68. EMIT(PPC_RAW_SLDI(d, d, 32)); \
  69. if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
  70. EMIT(PPC_RAW_ORIS(d, d, \
  71. ((uintptr_t)(i) >> 16) & 0xffff)); \
  72. if ((uintptr_t)(i) & 0x000000000000ffffULL) \
  73. EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \
  74. 0xffff)); \
  75. } } while (0)
  76. #endif
  77. /*
  78. * The fly in the ointment of code size changing from pass to pass is
  79. * avoided by padding the short branch case with a NOP. If code size differs
  80. * with different branch reaches we will have the issue of code moving from
  81. * one pass to the next and will need a few passes to converge on a stable
  82. * state.
  83. */
  84. #define PPC_BCC(cond, dest) do { \
  85. if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
  86. PPC_BCC_SHORT(cond, dest); \
  87. EMIT(PPC_RAW_NOP()); \
  88. } else { \
  89. /* Flip the 'T or F' bit to invert comparison */ \
  90. PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
  91. PPC_JMP(dest); \
  92. } } while(0)
  93. /* To create a branch condition, select a bit of cr0... */
  94. #define CR0_LT 0
  95. #define CR0_GT 1
  96. #define CR0_EQ 2
  97. /* ...and modify BO[3] */
  98. #define COND_CMP_TRUE 0x100
  99. #define COND_CMP_FALSE 0x000
  100. /* Together, they make all required comparisons: */
  101. #define COND_GT (CR0_GT | COND_CMP_TRUE)
  102. #define COND_GE (CR0_LT | COND_CMP_FALSE)
  103. #define COND_EQ (CR0_EQ | COND_CMP_TRUE)
  104. #define COND_NE (CR0_EQ | COND_CMP_FALSE)
  105. #define COND_LT (CR0_LT | COND_CMP_TRUE)
  106. #define COND_LE (CR0_GT | COND_CMP_FALSE)
  107. #define SEEN_FUNC 0x20000000 /* might call external helpers */
  108. #define SEEN_TAILCALL 0x40000000 /* uses tail calls */
  109. struct codegen_context {
  110. /*
  111. * This is used to track register usage as well
  112. * as calls to external helpers.
  113. * - register usage is tracked with corresponding
  114. * bits (r3-r31)
  115. * - rest of the bits can be used to track other
  116. * things -- for now, we use bits 0 to 2
  117. * encoded in SEEN_* macros above
  118. */
  119. unsigned int seen;
  120. unsigned int idx;
  121. unsigned int stack_size;
  122. int b2p[MAX_BPF_JIT_REG + 2];
  123. unsigned int exentry_idx;
  124. unsigned int alt_exit_addr;
  125. };
  126. #define bpf_to_ppc(r) (ctx->b2p[r])
  127. #ifdef CONFIG_PPC32
  128. #define BPF_FIXUP_LEN 3 /* Three instructions => 12 bytes */
  129. #else
  130. #define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */
  131. #endif
  132. static inline void bpf_flush_icache(void *start, void *end)
  133. {
  134. smp_wmb(); /* smp write barrier */
  135. flush_icache_range((unsigned long)start, (unsigned long)end);
  136. }
  137. static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
  138. {
  139. return ctx->seen & (1 << (31 - i));
  140. }
  141. static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
  142. {
  143. ctx->seen |= 1 << (31 - i);
  144. }
  145. static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
  146. {
  147. ctx->seen &= ~(1 << (31 - i));
  148. }
  149. void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
  150. int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
  151. int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
  152. u32 *addrs, int pass);
  153. void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
  154. void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
  155. void bpf_jit_realloc_regs(struct codegen_context *ctx);
  156. int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
  157. int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
  158. int insn_idx, int jmp_off, int dst_reg);
  159. #endif
  160. #endif