code-patching.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #ifndef _ASM_POWERPC_CODE_PATCHING_H
  3. #define _ASM_POWERPC_CODE_PATCHING_H
  4. /*
  5. * Copyright 2008, Michael Ellerman, IBM Corporation.
  6. */
  7. #include <asm/types.h>
  8. #include <asm/ppc-opcode.h>
  9. #include <linux/string.h>
  10. #include <linux/kallsyms.h>
  11. #include <asm/asm-compat.h>
  12. #include <asm/inst.h>
  13. /* Flags for create_branch:
  14. * "b" == create_branch(addr, target, 0);
  15. * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
  16. * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
  17. * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
  18. */
  19. #define BRANCH_SET_LINK 0x1
  20. #define BRANCH_ABSOLUTE 0x2
  21. DECLARE_STATIC_KEY_FALSE(init_mem_is_free);
  22. /*
  23. * Powerpc branch instruction is :
  24. *
  25. * 0 6 30 31
  26. * +---------+----------------+---+---+
  27. * | opcode | LI |AA |LK |
  28. * +---------+----------------+---+---+
  29. * Where AA = 0 and LK = 0
  30. *
  31. * LI is a signed 24 bits integer. The real branch offset is computed
  32. * by: imm32 = SignExtend(LI:'0b00', 32);
  33. *
  34. * So the maximum forward branch should be:
  35. * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
  36. * The maximum backward branch should be:
  37. * (0xff800000 << 2) = 0xfe000000 = -0x2000000
  38. */
  39. static inline bool is_offset_in_branch_range(long offset)
  40. {
  41. return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
  42. }
  43. static inline bool is_offset_in_cond_branch_range(long offset)
  44. {
  45. return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
  46. }
  47. static inline int create_branch(ppc_inst_t *instr, const u32 *addr,
  48. unsigned long target, int flags)
  49. {
  50. long offset;
  51. *instr = ppc_inst(0);
  52. offset = target;
  53. if (! (flags & BRANCH_ABSOLUTE))
  54. offset = offset - (unsigned long)addr;
  55. /* Check we can represent the target in the instruction format */
  56. if (!is_offset_in_branch_range(offset))
  57. return 1;
  58. /* Mask out the flags and target, so they don't step on each other. */
  59. *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
  60. return 0;
  61. }
  62. int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
  63. unsigned long target, int flags);
  64. int patch_branch(u32 *addr, unsigned long target, int flags);
  65. int patch_instruction(u32 *addr, ppc_inst_t instr);
  66. int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
  67. static inline unsigned long patch_site_addr(s32 *site)
  68. {
  69. return (unsigned long)site + *site;
  70. }
  71. static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
  72. {
  73. return patch_instruction((u32 *)patch_site_addr(site), instr);
  74. }
  75. static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
  76. {
  77. return patch_branch((u32 *)patch_site_addr(site), target, flags);
  78. }
  79. static inline int modify_instruction(unsigned int *addr, unsigned int clr,
  80. unsigned int set)
  81. {
  82. return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
  83. }
  84. static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
  85. {
  86. return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
  87. }
  88. static inline unsigned int branch_opcode(ppc_inst_t instr)
  89. {
  90. return ppc_inst_primary_opcode(instr) & 0x3F;
  91. }
  92. static inline int instr_is_branch_iform(ppc_inst_t instr)
  93. {
  94. return branch_opcode(instr) == 18;
  95. }
  96. static inline int instr_is_branch_bform(ppc_inst_t instr)
  97. {
  98. return branch_opcode(instr) == 16;
  99. }
  100. int instr_is_relative_branch(ppc_inst_t instr);
  101. int instr_is_relative_link_branch(ppc_inst_t instr);
  102. unsigned long branch_target(const u32 *instr);
  103. int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
  104. bool is_conditional_branch(ppc_inst_t instr);
  105. #define OP_RT_RA_MASK 0xffff0000UL
  106. #define LIS_R2 (PPC_RAW_LIS(_R2, 0))
  107. #define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
  108. #define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
  109. static inline unsigned long ppc_function_entry(void *func)
  110. {
  111. #ifdef CONFIG_PPC64_ELF_ABI_V2
  112. u32 *insn = func;
  113. /*
  114. * A PPC64 ABIv2 function may have a local and a global entry
  115. * point. We need to use the local entry point when patching
  116. * functions, so identify and step over the global entry point
  117. * sequence.
  118. *
  119. * The global entry point sequence is always of the form:
  120. *
  121. * addis r2,r12,XXXX
  122. * addi r2,r2,XXXX
  123. *
  124. * A linker optimisation may convert the addis to lis:
  125. *
  126. * lis r2,XXXX
  127. * addi r2,r2,XXXX
  128. */
  129. if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
  130. ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
  131. ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
  132. return (unsigned long)(insn + 2);
  133. else
  134. return (unsigned long)func;
  135. #elif defined(CONFIG_PPC64_ELF_ABI_V1)
  136. /*
  137. * On PPC64 ABIv1 the function pointer actually points to the
  138. * function's descriptor. The first entry in the descriptor is the
  139. * address of the function text.
  140. */
  141. return ((struct func_desc *)func)->addr;
  142. #else
  143. return (unsigned long)func;
  144. #endif
  145. }
  146. static inline unsigned long ppc_global_function_entry(void *func)
  147. {
  148. #ifdef CONFIG_PPC64_ELF_ABI_V2
  149. /* PPC64 ABIv2 the global entry point is at the address */
  150. return (unsigned long)func;
  151. #else
  152. /* All other cases there is no change vs ppc_function_entry() */
  153. return ppc_function_entry(func);
  154. #endif
  155. }
  156. /*
  157. * Wrapper around kallsyms_lookup() to return function entry address:
  158. * - For ABIv1, we lookup the dot variant.
  159. * - For ABIv2, we return the local entry point.
  160. */
  161. static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
  162. {
  163. unsigned long addr;
  164. #ifdef CONFIG_PPC64_ELF_ABI_V1
  165. /* check for dot variant */
  166. char dot_name[1 + KSYM_NAME_LEN];
  167. bool dot_appended = false;
  168. if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
  169. return 0;
  170. if (name[0] != '.') {
  171. dot_name[0] = '.';
  172. dot_name[1] = '\0';
  173. strlcat(dot_name, name, sizeof(dot_name));
  174. dot_appended = true;
  175. } else {
  176. dot_name[0] = '\0';
  177. strlcat(dot_name, name, sizeof(dot_name));
  178. }
  179. addr = kallsyms_lookup_name(dot_name);
  180. if (!addr && dot_appended)
  181. /* Let's try the original non-dot symbol lookup */
  182. addr = kallsyms_lookup_name(name);
  183. #elif defined(CONFIG_PPC64_ELF_ABI_V2)
  184. addr = kallsyms_lookup_name(name);
  185. if (addr)
  186. addr = ppc_function_entry((void *)addr);
  187. #else
  188. addr = kallsyms_lookup_name(name);
  189. #endif
  190. return addr;
  191. }
  192. /*
  193. * Some instruction encodings commonly used in dynamic ftracing
  194. * and function live patching.
  195. */
  196. /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
  197. #ifdef CONFIG_PPC64_ELF_ABI_V2
  198. #define R2_STACK_OFFSET 24
  199. #else
  200. #define R2_STACK_OFFSET 40
  201. #endif
  202. #define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
  203. /* usually preceded by a mflr r0 */
  204. #define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
  205. #endif /* _ASM_POWERPC_CODE_PATCHING_H */