simulate-insn.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #include <linux/bitops.h>
  3. #include <linux/kernel.h>
  4. #include <linux/kprobes.h>
  5. #include "decode-insn.h"
  6. #include "simulate-insn.h"
  7. static inline bool rv_insn_reg_get_val(struct pt_regs *regs, u32 index,
  8. unsigned long *ptr)
  9. {
  10. if (index == 0)
  11. *ptr = 0;
  12. else if (index <= 31)
  13. *ptr = *((unsigned long *)regs + index);
  14. else
  15. return false;
  16. return true;
  17. }
  18. static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
  19. unsigned long val)
  20. {
  21. if (index == 0)
  22. return true;
  23. else if (index <= 31)
  24. *((unsigned long *)regs + index) = val;
  25. else
  26. return false;
  27. return true;
  28. }
  29. bool __kprobes simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs)
  30. {
  31. /*
  32. * 31 30 21 20 19 12 11 7 6 0
  33. * imm [20] | imm[10:1] | imm[11] | imm[19:12] | rd | opcode
  34. * 1 10 1 8 5 JAL/J
  35. */
  36. bool ret;
  37. u32 imm;
  38. u32 index = (opcode >> 7) & 0x1f;
  39. ret = rv_insn_reg_set_val(regs, index, addr + 4);
  40. if (!ret)
  41. return ret;
  42. imm = ((opcode >> 21) & 0x3ff) << 1;
  43. imm |= ((opcode >> 20) & 0x1) << 11;
  44. imm |= ((opcode >> 12) & 0xff) << 12;
  45. imm |= ((opcode >> 31) & 0x1) << 20;
  46. instruction_pointer_set(regs, addr + sign_extend32((imm), 20));
  47. return ret;
  48. }
  49. bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs)
  50. {
  51. /*
  52. * 31 20 19 15 14 12 11 7 6 0
  53. * offset[11:0] | rs1 | 010 | rd | opcode
  54. * 12 5 3 5 JALR/JR
  55. */
  56. bool ret;
  57. unsigned long base_addr;
  58. u32 imm = (opcode >> 20) & 0xfff;
  59. u32 rd_index = (opcode >> 7) & 0x1f;
  60. u32 rs1_index = (opcode >> 15) & 0x1f;
  61. ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
  62. if (!ret)
  63. return ret;
  64. ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
  65. if (!ret)
  66. return ret;
  67. instruction_pointer_set(regs, (base_addr + sign_extend32((imm), 11))&~1);
  68. return ret;
  69. }
  70. #define auipc_rd_idx(opcode) \
  71. ((opcode >> 7) & 0x1f)
  72. #define auipc_imm(opcode) \
  73. ((((opcode) >> 12) & 0xfffff) << 12)
  74. #if __riscv_xlen == 64
  75. #define auipc_offset(opcode) sign_extend64(auipc_imm(opcode), 31)
  76. #elif __riscv_xlen == 32
  77. #define auipc_offset(opcode) auipc_imm(opcode)
  78. #else
  79. #error "Unexpected __riscv_xlen"
  80. #endif
  81. bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *regs)
  82. {
  83. /*
  84. * auipc instruction:
  85. * 31 12 11 7 6 0
  86. * | imm[31:12] | rd | opcode |
  87. * 20 5 7
  88. */
  89. u32 rd_idx = auipc_rd_idx(opcode);
  90. unsigned long rd_val = addr + auipc_offset(opcode);
  91. if (!rv_insn_reg_set_val(regs, rd_idx, rd_val))
  92. return false;
  93. instruction_pointer_set(regs, addr + 4);
  94. return true;
  95. }
  96. #define branch_rs1_idx(opcode) \
  97. (((opcode) >> 15) & 0x1f)
  98. #define branch_rs2_idx(opcode) \
  99. (((opcode) >> 20) & 0x1f)
  100. #define branch_funct3(opcode) \
  101. (((opcode) >> 12) & 0x7)
  102. #define branch_imm(opcode) \
  103. (((((opcode) >> 8) & 0xf ) << 1) | \
  104. ((((opcode) >> 25) & 0x3f) << 5) | \
  105. ((((opcode) >> 7) & 0x1 ) << 11) | \
  106. ((((opcode) >> 31) & 0x1 ) << 12))
  107. #define branch_offset(opcode) \
  108. sign_extend32((branch_imm(opcode)), 12)
  109. #define BRANCH_BEQ 0x0
  110. #define BRANCH_BNE 0x1
  111. #define BRANCH_BLT 0x4
  112. #define BRANCH_BGE 0x5
  113. #define BRANCH_BLTU 0x6
  114. #define BRANCH_BGEU 0x7
  115. bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs)
  116. {
  117. /*
  118. * branch instructions:
  119. * 31 30 25 24 20 19 15 14 12 11 8 7 6 0
  120. * | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1] | imm[11] | opcode |
  121. * 1 6 5 5 3 4 1 7
  122. * imm[12|10:5] rs2 rs1 000 imm[4:1|11] 1100011 BEQ
  123. * imm[12|10:5] rs2 rs1 001 imm[4:1|11] 1100011 BNE
  124. * imm[12|10:5] rs2 rs1 100 imm[4:1|11] 1100011 BLT
  125. * imm[12|10:5] rs2 rs1 101 imm[4:1|11] 1100011 BGE
  126. * imm[12|10:5] rs2 rs1 110 imm[4:1|11] 1100011 BLTU
  127. * imm[12|10:5] rs2 rs1 111 imm[4:1|11] 1100011 BGEU
  128. */
  129. s32 offset;
  130. s32 offset_tmp;
  131. unsigned long rs1_val;
  132. unsigned long rs2_val;
  133. if (!rv_insn_reg_get_val(regs, branch_rs1_idx(opcode), &rs1_val) ||
  134. !rv_insn_reg_get_val(regs, branch_rs2_idx(opcode), &rs2_val))
  135. return false;
  136. offset_tmp = branch_offset(opcode);
  137. switch (branch_funct3(opcode)) {
  138. case BRANCH_BEQ:
  139. offset = (rs1_val == rs2_val) ? offset_tmp : 4;
  140. break;
  141. case BRANCH_BNE:
  142. offset = (rs1_val != rs2_val) ? offset_tmp : 4;
  143. break;
  144. case BRANCH_BLT:
  145. offset = ((long)rs1_val < (long)rs2_val) ? offset_tmp : 4;
  146. break;
  147. case BRANCH_BGE:
  148. offset = ((long)rs1_val >= (long)rs2_val) ? offset_tmp : 4;
  149. break;
  150. case BRANCH_BLTU:
  151. offset = (rs1_val < rs2_val) ? offset_tmp : 4;
  152. break;
  153. case BRANCH_BGEU:
  154. offset = (rs1_val >= rs2_val) ? offset_tmp : 4;
  155. break;
  156. default:
  157. return false;
  158. }
  159. instruction_pointer_set(regs, addr + offset);
  160. return true;
  161. }