dsemul.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/err.h>
  3. #include <linux/slab.h>
  4. #include <linux/mm_types.h>
  5. #include <linux/sched/task.h>
  6. #include <asm/branch.h>
  7. #include <asm/cacheflush.h>
  8. #include <asm/fpu_emulator.h>
  9. #include <asm/inst.h>
  10. #include <asm/mipsregs.h>
  11. #include <linux/uaccess.h>
  12. /**
  13. * struct emuframe - The 'emulation' frame structure
  14. * @emul: The instruction to 'emulate'.
  15. * @badinst: A break instruction to cause a return to the kernel.
  16. *
  17. * This structure defines the frames placed within the delay slot emulation
  18. * page in response to a call to mips_dsemul(). Each thread may be allocated
  19. * only one frame at any given time. The kernel stores within it the
  20. * instruction to be 'emulated' followed by a break instruction, then
  21. * executes the frame in user mode. The break causes a trap to the kernel
  22. * which leads to do_dsemulret() being called unless the instruction in
  23. * @emul causes a trap itself, is a branch, or a signal is delivered to
  24. * the thread. In these cases the allocated frame will either be reused by
  25. * a subsequent delay slot 'emulation', or be freed during signal delivery or
  26. * upon thread exit.
  27. *
  28. * This approach is used because:
  29. *
  30. * - Actually emulating all instructions isn't feasible. We would need to
  31. * be able to handle instructions from all revisions of the MIPS ISA,
  32. * all ASEs & all vendor instruction set extensions. This would be a
  33. * whole lot of work & continual maintenance burden as new instructions
  34. * are introduced, and in the case of some vendor extensions may not
  35. * even be possible. Thus we need to take the approach of actually
  36. * executing the instruction.
  37. *
  38. * - We must execute the instruction within user context. If we were to
  39. * execute the instruction in kernel mode then it would have access to
  40. * kernel resources without very careful checks, leaving us with a
  41. * high potential for security or stability issues to arise.
  42. *
  43. * - We used to place the frame on the users stack, but this requires
  44. * that the stack be executable. This is bad for security so the
  45. * per-process page is now used instead.
  46. *
  47. * - The instruction in @emul may be something entirely invalid for a
  48. * delay slot. The user may (intentionally or otherwise) place a branch
  49. * in a delay slot, or a kernel mode instruction, or something else
  50. * which generates an exception. Thus we can't rely upon the break in
  51. * @badinst always being hit. For this reason we track the index of the
  52. * frame allocated to each thread, allowing us to clean it up at later
  53. * points such as signal delivery or thread exit.
  54. *
  55. * - The user may generate a fake struct emuframe if they wish, invoking
  56. * the BRK_MEMU break instruction themselves. We must therefore not
  57. * trust that BRK_MEMU means there's actually a valid frame allocated
  58. * to the thread, and must not allow the user to do anything they
  59. * couldn't already.
  60. */
  61. struct emuframe {
  62. mips_instruction emul;
  63. mips_instruction badinst;
  64. };
  65. static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
  66. static inline __user struct emuframe *dsemul_page(void)
  67. {
  68. return (__user struct emuframe *)STACK_TOP;
  69. }
  70. static int alloc_emuframe(void)
  71. {
  72. mm_context_t *mm_ctx = &current->mm->context;
  73. int idx;
  74. retry:
  75. spin_lock(&mm_ctx->bd_emupage_lock);
  76. /* Ensure we have an allocation bitmap */
  77. if (!mm_ctx->bd_emupage_allocmap) {
  78. mm_ctx->bd_emupage_allocmap = bitmap_zalloc(emupage_frame_count,
  79. GFP_ATOMIC);
  80. if (!mm_ctx->bd_emupage_allocmap) {
  81. idx = BD_EMUFRAME_NONE;
  82. goto out_unlock;
  83. }
  84. }
  85. /* Attempt to allocate a single bit/frame */
  86. idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
  87. emupage_frame_count, 0);
  88. if (idx < 0) {
  89. /*
  90. * Failed to allocate a frame. We'll wait until one becomes
  91. * available. We unlock the page so that other threads actually
  92. * get the opportunity to free their frames, which means
  93. * technically the result of bitmap_full may be incorrect.
  94. * However the worst case is that we repeat all this and end up
  95. * back here again.
  96. */
  97. spin_unlock(&mm_ctx->bd_emupage_lock);
  98. if (!wait_event_killable(mm_ctx->bd_emupage_queue,
  99. !bitmap_full(mm_ctx->bd_emupage_allocmap,
  100. emupage_frame_count)))
  101. goto retry;
  102. /* Received a fatal signal - just give in */
  103. return BD_EMUFRAME_NONE;
  104. }
  105. /* Success! */
  106. pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
  107. out_unlock:
  108. spin_unlock(&mm_ctx->bd_emupage_lock);
  109. return idx;
  110. }
  111. static void free_emuframe(int idx, struct mm_struct *mm)
  112. {
  113. mm_context_t *mm_ctx = &mm->context;
  114. spin_lock(&mm_ctx->bd_emupage_lock);
  115. pr_debug("free emuframe %d from %d\n", idx, current->pid);
  116. bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
  117. /* If some thread is waiting for a frame, now's its chance */
  118. wake_up(&mm_ctx->bd_emupage_queue);
  119. spin_unlock(&mm_ctx->bd_emupage_lock);
  120. }
  121. static bool within_emuframe(struct pt_regs *regs)
  122. {
  123. unsigned long base = (unsigned long)dsemul_page();
  124. if (regs->cp0_epc < base)
  125. return false;
  126. if (regs->cp0_epc >= (base + PAGE_SIZE))
  127. return false;
  128. return true;
  129. }
  130. bool dsemul_thread_cleanup(struct task_struct *tsk)
  131. {
  132. int fr_idx;
  133. /* Clear any allocated frame, retrieving its index */
  134. fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
  135. /* If no frame was allocated, we're done */
  136. if (fr_idx == BD_EMUFRAME_NONE)
  137. return false;
  138. task_lock(tsk);
  139. /* Free the frame that this thread had allocated */
  140. if (tsk->mm)
  141. free_emuframe(fr_idx, tsk->mm);
  142. task_unlock(tsk);
  143. return true;
  144. }
  145. bool dsemul_thread_rollback(struct pt_regs *regs)
  146. {
  147. struct emuframe __user *fr;
  148. int fr_idx;
  149. /* Do nothing if we're not executing from a frame */
  150. if (!within_emuframe(regs))
  151. return false;
  152. /* Find the frame being executed */
  153. fr_idx = atomic_read(&current->thread.bd_emu_frame);
  154. if (fr_idx == BD_EMUFRAME_NONE)
  155. return false;
  156. fr = &dsemul_page()[fr_idx];
  157. /*
  158. * If the PC is at the emul instruction, roll back to the branch. If
  159. * PC is at the badinst (break) instruction, we've already emulated the
  160. * instruction so progress to the continue PC. If it's anything else
  161. * then something is amiss & the user has branched into some other area
  162. * of the emupage - we'll free the allocated frame anyway.
  163. */
  164. if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
  165. regs->cp0_epc = current->thread.bd_emu_branch_pc;
  166. else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
  167. regs->cp0_epc = current->thread.bd_emu_cont_pc;
  168. atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
  169. free_emuframe(fr_idx, current->mm);
  170. return true;
  171. }
  172. void dsemul_mm_cleanup(struct mm_struct *mm)
  173. {
  174. mm_context_t *mm_ctx = &mm->context;
  175. bitmap_free(mm_ctx->bd_emupage_allocmap);
  176. }
  177. int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
  178. unsigned long branch_pc, unsigned long cont_pc)
  179. {
  180. int isa16 = get_isa16_mode(regs->cp0_epc);
  181. mips_instruction break_math;
  182. unsigned long fr_uaddr;
  183. struct emuframe fr;
  184. int fr_idx, ret;
  185. /* NOP is easy */
  186. if (ir == 0)
  187. return -1;
  188. /* microMIPS instructions */
  189. if (isa16) {
  190. union mips_instruction insn = { .word = ir };
  191. /* NOP16 aka MOVE16 $0, $0 */
  192. if ((ir >> 16) == MM_NOP16)
  193. return -1;
  194. /* ADDIUPC */
  195. if (insn.mm_a_format.opcode == mm_addiupc_op) {
  196. unsigned int rs;
  197. s32 v;
  198. rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
  199. v = regs->cp0_epc & ~3;
  200. v += insn.mm_a_format.simmediate << 2;
  201. regs->regs[rs] = (long)v;
  202. return -1;
  203. }
  204. }
  205. pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
  206. /* Allocate a frame if we don't already have one */
  207. fr_idx = atomic_read(&current->thread.bd_emu_frame);
  208. if (fr_idx == BD_EMUFRAME_NONE)
  209. fr_idx = alloc_emuframe();
  210. if (fr_idx == BD_EMUFRAME_NONE)
  211. return SIGBUS;
  212. /* Retrieve the appropriately encoded break instruction */
  213. break_math = BREAK_MATH(isa16);
  214. /* Write the instructions to the frame */
  215. if (isa16) {
  216. union mips_instruction _emul = {
  217. .halfword = { ir >> 16, ir }
  218. };
  219. union mips_instruction _badinst = {
  220. .halfword = { break_math >> 16, break_math }
  221. };
  222. fr.emul = _emul.word;
  223. fr.badinst = _badinst.word;
  224. } else {
  225. fr.emul = ir;
  226. fr.badinst = break_math;
  227. }
  228. /* Write the frame to user memory */
  229. fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
  230. ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
  231. FOLL_FORCE | FOLL_WRITE);
  232. if (unlikely(ret != sizeof(fr))) {
  233. MIPS_FPU_EMU_INC_STATS(errors);
  234. free_emuframe(fr_idx, current->mm);
  235. return SIGBUS;
  236. }
  237. /* Record the PC of the branch, PC to continue from & frame index */
  238. current->thread.bd_emu_branch_pc = branch_pc;
  239. current->thread.bd_emu_cont_pc = cont_pc;
  240. atomic_set(&current->thread.bd_emu_frame, fr_idx);
  241. /* Change user register context to execute the frame */
  242. regs->cp0_epc = fr_uaddr | isa16;
  243. return 0;
  244. }
  245. bool do_dsemulret(struct pt_regs *xcp)
  246. {
  247. /* Cleanup the allocated frame, returning if there wasn't one */
  248. if (!dsemul_thread_cleanup(current)) {
  249. MIPS_FPU_EMU_INC_STATS(errors);
  250. return false;
  251. }
  252. /* Set EPC to return to post-branch instruction */
  253. xcp->cp0_epc = current->thread.bd_emu_cont_pc;
  254. pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
  255. MIPS_FPU_EMU_INC_STATS(ds_emul);
  256. return true;
  257. }