process.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * Amit Bhor, Kanika Nema: Codito Technologies 2004
  6. */
  7. #include <linux/errno.h>
  8. #include <linux/module.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/task.h>
  11. #include <linux/sched/task_stack.h>
  12. #include <linux/mm.h>
  13. #include <linux/fs.h>
  14. #include <linux/unistd.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/slab.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/elf.h>
  19. #include <linux/tick.h>
  20. #include <asm/fpu.h>
  21. SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
  22. {
  23. task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
  24. return 0;
  25. }
  26. /*
  27. * We return the user space TLS data ptr as sys-call return code
  28. * Ideally it should be copy to user.
  29. * However we can cheat by the fact that some sys-calls do return
  30. * absurdly high values
  31. * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
  32. * it won't be considered a sys-call error
  33. * and it will be loads better than copy-to-user, which is a definite
  34. * D-TLB Miss
  35. */
  36. SYSCALL_DEFINE0(arc_gettls)
  37. {
  38. return task_thread_info(current)->thr_ptr;
  39. }
  40. SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new)
  41. {
  42. struct pt_regs *regs = current_pt_regs();
  43. u32 uval;
  44. int ret;
  45. /*
  46. * This is only for old cores lacking LLOCK/SCOND, which by definition
  47. * can't possibly be SMP. Thus doesn't need to be SMP safe.
  48. * And this also helps reduce the overhead for serializing in
  49. * the UP case
  50. */
  51. WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
  52. /* Z indicates to userspace if operation succeeded */
  53. regs->status32 &= ~STATUS_Z_MASK;
  54. ret = access_ok(uaddr, sizeof(*uaddr));
  55. if (!ret)
  56. goto fail;
  57. again:
  58. preempt_disable();
  59. ret = __get_user(uval, uaddr);
  60. if (ret)
  61. goto fault;
  62. if (uval != expected)
  63. goto out;
  64. ret = __put_user(new, uaddr);
  65. if (ret)
  66. goto fault;
  67. regs->status32 |= STATUS_Z_MASK;
  68. out:
  69. preempt_enable();
  70. return uval;
  71. fault:
  72. preempt_enable();
  73. if (unlikely(ret != -EFAULT))
  74. goto fail;
  75. mmap_read_lock(current->mm);
  76. ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
  77. FAULT_FLAG_WRITE, NULL);
  78. mmap_read_unlock(current->mm);
  79. if (likely(!ret))
  80. goto again;
  81. fail:
  82. force_sig(SIGSEGV);
  83. return ret;
  84. }
  85. #ifdef CONFIG_ISA_ARCV2
  86. void arch_cpu_idle(void)
  87. {
  88. /* Re-enable interrupts <= default irq priority before committing SLEEP */
  89. const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
  90. __asm__ __volatile__(
  91. "sleep %0 \n"
  92. :
  93. :"I"(arg)); /* can't be "r" has to be embedded const */
  94. }
  95. #else /* ARC700 */
  96. void arch_cpu_idle(void)
  97. {
  98. /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
  99. __asm__ __volatile__("sleep 0x3 \n");
  100. }
  101. #endif
  102. asmlinkage void ret_from_fork(void);
  103. /*
  104. * Copy architecture-specific thread state
  105. *
  106. * Layout of Child kernel mode stack as setup at the end of this function is
  107. *
  108. * | ... |
  109. * | ... |
  110. * | unused |
  111. * | |
  112. * ------------------
  113. * | r25 | <==== top of Stack (thread.ksp)
  114. * ~ ~
  115. * | --to-- | (CALLEE Regs of kernel mode)
  116. * | r13 |
  117. * ------------------
  118. * | fp |
  119. * | blink | @ret_from_fork
  120. * ------------------
  121. * | |
  122. * ~ ~
  123. * ~ ~
  124. * | |
  125. * ------------------
  126. * | r12 |
  127. * ~ ~
  128. * | --to-- | (scratch Regs of user mode)
  129. * | r0 |
  130. * ------------------
  131. * | SP |
  132. * | orig_r0 |
  133. * | event/ECR |
  134. * | user_r25 |
  135. * ------------------ <===== END of PAGE
  136. */
  137. int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
  138. {
  139. unsigned long clone_flags = args->flags;
  140. unsigned long usp = args->stack;
  141. unsigned long tls = args->tls;
  142. struct pt_regs *c_regs; /* child's pt_regs */
  143. unsigned long *childksp; /* to unwind out of __switch_to() */
  144. struct callee_regs *c_callee; /* child's callee regs */
  145. struct callee_regs *parent_callee; /* paren't callee */
  146. struct pt_regs *regs = current_pt_regs();
  147. /* Mark the specific anchors to begin with (see pic above) */
  148. c_regs = task_pt_regs(p);
  149. childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
  150. c_callee = ((struct callee_regs *)childksp) - 1;
  151. /*
  152. * __switch_to() uses thread.ksp to start unwinding stack
  153. * For kernel threads we don't need to create callee regs, the
  154. * stack layout nevertheless needs to remain the same.
  155. * Also, since __switch_to anyways unwinds callee regs, we use
  156. * this to populate kernel thread entry-pt/args into callee regs,
  157. * so that ret_from_kernel_thread() becomes simpler.
  158. */
  159. p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
  160. /* __switch_to expects FP(0), BLINK(return addr) at top */
  161. childksp[0] = 0; /* fp */
  162. childksp[1] = (unsigned long)ret_from_fork; /* blink */
  163. if (unlikely(args->fn)) {
  164. memset(c_regs, 0, sizeof(struct pt_regs));
  165. c_callee->r13 = (unsigned long)args->fn_arg;
  166. c_callee->r14 = (unsigned long)args->fn;
  167. return 0;
  168. }
  169. /*--------- User Task Only --------------*/
  170. /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
  171. childksp[0] = 0; /* for POP fp */
  172. childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
  173. /* Copy parents pt regs on child's kernel mode stack */
  174. *c_regs = *regs;
  175. if (usp)
  176. c_regs->sp = usp;
  177. c_regs->r0 = 0; /* fork returns 0 in child */
  178. parent_callee = ((struct callee_regs *)regs) - 1;
  179. *c_callee = *parent_callee;
  180. if (unlikely(clone_flags & CLONE_SETTLS)) {
  181. /*
  182. * set task's userland tls data ptr from 4th arg
  183. * clone C-lib call is difft from clone sys-call
  184. */
  185. task_thread_info(p)->thr_ptr = tls;
  186. } else {
  187. /* Normal fork case: set parent's TLS ptr in child */
  188. task_thread_info(p)->thr_ptr =
  189. task_thread_info(current)->thr_ptr;
  190. }
  191. /*
  192. * setup usermode thread pointer #1:
  193. * when child is picked by scheduler, __switch_to() uses @c_callee to
  194. * populate usermode callee regs: this works (despite being in a kernel
  195. * function) since special return path for child @ret_from_fork()
  196. * ensures those regs are not clobbered all the way to RTIE to usermode
  197. */
  198. c_callee->r25 = task_thread_info(p)->thr_ptr;
  199. #ifdef CONFIG_ARC_CURR_IN_REG
  200. /*
  201. * setup usermode thread pointer #2:
  202. * however for this special use of r25 in kernel, __switch_to() sets
  203. * r25 for kernel needs and only in the final return path is usermode
  204. * r25 setup, from pt_regs->user_r25. So set that up as well
  205. */
  206. c_regs->user_r25 = c_callee->r25;
  207. #endif
  208. return 0;
  209. }
  210. /*
  211. * Do necessary setup to start up a new user task
  212. */
  213. void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
  214. {
  215. regs->sp = usp;
  216. regs->ret = pc;
  217. /*
  218. * [U]ser Mode bit set
  219. * [L] ZOL loop inhibited to begin with - cleared by a LP insn
  220. * Interrupts enabled
  221. */
  222. regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
  223. fpu_init_task(regs);
  224. /* bogus seed values for debugging */
  225. regs->lp_start = 0x10;
  226. regs->lp_end = 0x80;
  227. }
  228. /*
  229. * Some archs flush debug and FPU info here
  230. */
  231. void flush_thread(void)
  232. {
  233. }
  234. int elf_check_arch(const struct elf32_hdr *x)
  235. {
  236. unsigned int eflags;
  237. if (x->e_machine != EM_ARC_INUSE) {
  238. pr_err("ELF not built for %s ISA\n",
  239. is_isa_arcompact() ? "ARCompact":"ARCv2");
  240. return 0;
  241. }
  242. eflags = x->e_flags;
  243. if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
  244. pr_err("ABI mismatch - you need newer toolchain\n");
  245. force_fatal_sig(SIGSEGV);
  246. return 0;
  247. }
  248. return 1;
  249. }
  250. EXPORT_SYMBOL(elf_check_arch);