traps.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Author: Huacai Chen <[email protected]>
  4. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5. */
  6. #include <linux/bitops.h>
  7. #include <linux/bug.h>
  8. #include <linux/compiler.h>
  9. #include <linux/context_tracking.h>
  10. #include <linux/entry-common.h>
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kexec.h>
  14. #include <linux/module.h>
  15. #include <linux/extable.h>
  16. #include <linux/mm.h>
  17. #include <linux/sched/mm.h>
  18. #include <linux/sched/debug.h>
  19. #include <linux/smp.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/memblock.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/kgdb.h>
  26. #include <linux/kdebug.h>
  27. #include <linux/kprobes.h>
  28. #include <linux/notifier.h>
  29. #include <linux/irq.h>
  30. #include <linux/perf_event.h>
  31. #include <asm/addrspace.h>
  32. #include <asm/bootinfo.h>
  33. #include <asm/branch.h>
  34. #include <asm/break.h>
  35. #include <asm/cpu.h>
  36. #include <asm/fpu.h>
  37. #include <asm/loongarch.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/ptrace.h>
  41. #include <asm/sections.h>
  42. #include <asm/siginfo.h>
  43. #include <asm/stacktrace.h>
  44. #include <asm/tlb.h>
  45. #include <asm/types.h>
  46. #include <asm/unwind.h>
  47. #include "access-helper.h"
  48. extern asmlinkage void handle_ade(void);
  49. extern asmlinkage void handle_ale(void);
  50. extern asmlinkage void handle_sys(void);
  51. extern asmlinkage void handle_bp(void);
  52. extern asmlinkage void handle_ri(void);
  53. extern asmlinkage void handle_fpu(void);
  54. extern asmlinkage void handle_fpe(void);
  55. extern asmlinkage void handle_lbt(void);
  56. extern asmlinkage void handle_lsx(void);
  57. extern asmlinkage void handle_lasx(void);
  58. extern asmlinkage void handle_reserved(void);
  59. extern asmlinkage void handle_watch(void);
  60. extern asmlinkage void handle_vint(void);
  61. static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
  62. const char *loglvl, bool user)
  63. {
  64. unsigned long addr;
  65. struct unwind_state state;
  66. struct pt_regs *pregs = (struct pt_regs *)regs;
  67. if (!task)
  68. task = current;
  69. if (user_mode(regs))
  70. state.type = UNWINDER_GUESS;
  71. printk("%sCall Trace:", loglvl);
  72. for (unwind_start(&state, task, pregs);
  73. !unwind_done(&state); unwind_next_frame(&state)) {
  74. addr = unwind_get_return_address(&state);
  75. print_ip_sym(loglvl, addr);
  76. }
  77. printk("%s\n", loglvl);
  78. }
  79. static void show_stacktrace(struct task_struct *task,
  80. const struct pt_regs *regs, const char *loglvl, bool user)
  81. {
  82. int i;
  83. const int field = 2 * sizeof(unsigned long);
  84. unsigned long stackdata;
  85. unsigned long *sp = (unsigned long *)regs->regs[3];
  86. printk("%sStack :", loglvl);
  87. i = 0;
  88. while ((unsigned long) sp & (PAGE_SIZE - 1)) {
  89. if (i && ((i % (64 / field)) == 0)) {
  90. pr_cont("\n");
  91. printk("%s ", loglvl);
  92. }
  93. if (i > 39) {
  94. pr_cont(" ...");
  95. break;
  96. }
  97. if (__get_addr(&stackdata, sp++, user)) {
  98. pr_cont(" (Bad stack address)");
  99. break;
  100. }
  101. pr_cont(" %0*lx", field, stackdata);
  102. i++;
  103. }
  104. pr_cont("\n");
  105. show_backtrace(task, regs, loglvl, user);
  106. }
  107. void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
  108. {
  109. struct pt_regs regs;
  110. regs.csr_crmd = 0;
  111. if (sp) {
  112. regs.csr_era = 0;
  113. regs.regs[1] = 0;
  114. regs.regs[3] = (unsigned long)sp;
  115. } else {
  116. if (!task || task == current)
  117. prepare_frametrace(&regs);
  118. else {
  119. regs.csr_era = task->thread.reg01;
  120. regs.regs[1] = 0;
  121. regs.regs[3] = task->thread.reg03;
  122. regs.regs[22] = task->thread.reg22;
  123. }
  124. }
  125. show_stacktrace(task, &regs, loglvl, false);
  126. }
  127. static void show_code(unsigned int *pc, bool user)
  128. {
  129. long i;
  130. unsigned int insn;
  131. printk("Code:");
  132. for(i = -3 ; i < 6 ; i++) {
  133. if (__get_inst(&insn, pc + i, user)) {
  134. pr_cont(" (Bad address in era)\n");
  135. break;
  136. }
  137. pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
  138. }
  139. pr_cont("\n");
  140. }
  141. static void __show_regs(const struct pt_regs *regs)
  142. {
  143. const int field = 2 * sizeof(unsigned long);
  144. unsigned int excsubcode;
  145. unsigned int exccode;
  146. int i;
  147. show_regs_print_info(KERN_DEFAULT);
  148. /*
  149. * Saved main processor registers
  150. */
  151. for (i = 0; i < 32; ) {
  152. if ((i % 4) == 0)
  153. printk("$%2d :", i);
  154. pr_cont(" %0*lx", field, regs->regs[i]);
  155. i++;
  156. if ((i % 4) == 0)
  157. pr_cont("\n");
  158. }
  159. /*
  160. * Saved csr registers
  161. */
  162. printk("era : %0*lx %pS\n", field, regs->csr_era,
  163. (void *) regs->csr_era);
  164. printk("ra : %0*lx %pS\n", field, regs->regs[1],
  165. (void *) regs->regs[1]);
  166. printk("CSR crmd: %08lx ", regs->csr_crmd);
  167. printk("CSR prmd: %08lx ", regs->csr_prmd);
  168. printk("CSR euen: %08lx ", regs->csr_euen);
  169. printk("CSR ecfg: %08lx ", regs->csr_ecfg);
  170. printk("CSR estat: %08lx ", regs->csr_estat);
  171. pr_cont("\n");
  172. exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
  173. excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT;
  174. printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode);
  175. if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
  176. printk("BadVA : %0*lx\n", field, regs->csr_badvaddr);
  177. printk("PrId : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
  178. cpu_family_string());
  179. }
  180. void show_regs(struct pt_regs *regs)
  181. {
  182. __show_regs((struct pt_regs *)regs);
  183. dump_stack();
  184. }
  185. void show_registers(struct pt_regs *regs)
  186. {
  187. __show_regs(regs);
  188. print_modules();
  189. printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
  190. current->comm, current->pid, current_thread_info(), current);
  191. show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
  192. show_code((void *)regs->csr_era, user_mode(regs));
  193. printk("\n");
  194. }
  195. static DEFINE_RAW_SPINLOCK(die_lock);
  196. void __noreturn die(const char *str, struct pt_regs *regs)
  197. {
  198. static int die_counter;
  199. int sig = SIGSEGV;
  200. oops_enter();
  201. if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
  202. SIGSEGV) == NOTIFY_STOP)
  203. sig = 0;
  204. console_verbose();
  205. raw_spin_lock_irq(&die_lock);
  206. bust_spinlocks(1);
  207. printk("%s[#%d]:\n", str, ++die_counter);
  208. show_registers(regs);
  209. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  210. raw_spin_unlock_irq(&die_lock);
  211. oops_exit();
  212. if (regs && kexec_should_crash(current))
  213. crash_kexec(regs);
  214. if (in_interrupt())
  215. panic("Fatal exception in interrupt");
  216. if (panic_on_oops)
  217. panic("Fatal exception");
  218. make_task_dead(sig);
  219. }
  220. static inline void setup_vint_size(unsigned int size)
  221. {
  222. unsigned int vs;
  223. vs = ilog2(size/4);
  224. if (vs == 0 || vs > 7)
  225. panic("vint_size %d Not support yet", vs);
  226. csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
  227. }
  228. /*
  229. * Send SIGFPE according to FCSR Cause bits, which must have already
  230. * been masked against Enable bits. This is impotant as Inexact can
  231. * happen together with Overflow or Underflow, and `ptrace' can set
  232. * any bits.
  233. */
  234. void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
  235. struct task_struct *tsk)
  236. {
  237. int si_code = FPE_FLTUNK;
  238. if (fcsr & FPU_CSR_INV_X)
  239. si_code = FPE_FLTINV;
  240. else if (fcsr & FPU_CSR_DIV_X)
  241. si_code = FPE_FLTDIV;
  242. else if (fcsr & FPU_CSR_OVF_X)
  243. si_code = FPE_FLTOVF;
  244. else if (fcsr & FPU_CSR_UDF_X)
  245. si_code = FPE_FLTUND;
  246. else if (fcsr & FPU_CSR_INE_X)
  247. si_code = FPE_FLTRES;
  248. force_sig_fault(SIGFPE, si_code, fault_addr);
  249. }
  250. int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
  251. {
  252. int si_code;
  253. switch (sig) {
  254. case 0:
  255. return 0;
  256. case SIGFPE:
  257. force_fcsr_sig(fcsr, fault_addr, current);
  258. return 1;
  259. case SIGBUS:
  260. force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
  261. return 1;
  262. case SIGSEGV:
  263. mmap_read_lock(current->mm);
  264. if (vma_lookup(current->mm, (unsigned long)fault_addr))
  265. si_code = SEGV_ACCERR;
  266. else
  267. si_code = SEGV_MAPERR;
  268. mmap_read_unlock(current->mm);
  269. force_sig_fault(SIGSEGV, si_code, fault_addr);
  270. return 1;
  271. default:
  272. force_sig(sig);
  273. return 1;
  274. }
  275. }
  276. /*
  277. * Delayed fp exceptions when doing a lazy ctx switch
  278. */
  279. asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
  280. {
  281. int sig;
  282. void __user *fault_addr;
  283. irqentry_state_t state = irqentry_enter(regs);
  284. if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
  285. SIGFPE) == NOTIFY_STOP)
  286. goto out;
  287. /* Clear FCSR.Cause before enabling interrupts */
  288. write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
  289. local_irq_enable();
  290. die_if_kernel("FP exception in kernel code", regs);
  291. sig = SIGFPE;
  292. fault_addr = (void __user *) regs->csr_era;
  293. /* Send a signal if required. */
  294. process_fpemu_return(sig, fault_addr, fcsr);
  295. out:
  296. local_irq_disable();
  297. irqentry_exit(regs, state);
  298. }
  299. asmlinkage void noinstr do_ade(struct pt_regs *regs)
  300. {
  301. irqentry_state_t state = irqentry_enter(regs);
  302. die_if_kernel("Kernel ade access", regs);
  303. force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
  304. irqentry_exit(regs, state);
  305. }
  306. asmlinkage void noinstr do_ale(struct pt_regs *regs)
  307. {
  308. irqentry_state_t state = irqentry_enter(regs);
  309. die_if_kernel("Kernel ale access", regs);
  310. force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
  311. irqentry_exit(regs, state);
  312. }
  313. #ifdef CONFIG_GENERIC_BUG
  314. int is_valid_bugaddr(unsigned long addr)
  315. {
  316. return 1;
  317. }
  318. #endif /* CONFIG_GENERIC_BUG */
  319. static void bug_handler(struct pt_regs *regs)
  320. {
  321. switch (report_bug(regs->csr_era, regs)) {
  322. case BUG_TRAP_TYPE_BUG:
  323. case BUG_TRAP_TYPE_NONE:
  324. die_if_kernel("Oops - BUG", regs);
  325. force_sig(SIGTRAP);
  326. break;
  327. case BUG_TRAP_TYPE_WARN:
  328. /* Skip the BUG instruction and continue */
  329. regs->csr_era += LOONGARCH_INSN_SIZE;
  330. break;
  331. }
  332. }
  333. asmlinkage void noinstr do_bp(struct pt_regs *regs)
  334. {
  335. bool user = user_mode(regs);
  336. unsigned int opcode, bcode;
  337. unsigned long era = exception_era(regs);
  338. irqentry_state_t state = irqentry_enter(regs);
  339. local_irq_enable();
  340. current->thread.trap_nr = read_csr_excode();
  341. if (__get_inst(&opcode, (u32 *)era, user))
  342. goto out_sigsegv;
  343. bcode = (opcode & 0x7fff);
  344. /*
  345. * notify the kprobe handlers, if instruction is likely to
  346. * pertain to them.
  347. */
  348. switch (bcode) {
  349. case BRK_KPROBE_BP:
  350. if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
  351. current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
  352. goto out;
  353. else
  354. break;
  355. case BRK_KPROBE_SSTEPBP:
  356. if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
  357. current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
  358. goto out;
  359. else
  360. break;
  361. case BRK_UPROBE_BP:
  362. if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode,
  363. current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
  364. goto out;
  365. else
  366. break;
  367. case BRK_UPROBE_XOLBP:
  368. if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode,
  369. current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
  370. goto out;
  371. else
  372. break;
  373. default:
  374. if (notify_die(DIE_TRAP, "Break", regs, bcode,
  375. current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
  376. goto out;
  377. else
  378. break;
  379. }
  380. switch (bcode) {
  381. case BRK_BUG:
  382. bug_handler(regs);
  383. break;
  384. case BRK_DIVZERO:
  385. die_if_kernel("Break instruction in kernel code", regs);
  386. force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
  387. break;
  388. case BRK_OVERFLOW:
  389. die_if_kernel("Break instruction in kernel code", regs);
  390. force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
  391. break;
  392. default:
  393. die_if_kernel("Break instruction in kernel code", regs);
  394. force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
  395. break;
  396. }
  397. out:
  398. local_irq_disable();
  399. irqentry_exit(regs, state);
  400. return;
  401. out_sigsegv:
  402. force_sig(SIGSEGV);
  403. goto out;
  404. }
  405. asmlinkage void noinstr do_watch(struct pt_regs *regs)
  406. {
  407. pr_warn("Hardware watch point handler not implemented!\n");
  408. }
  409. asmlinkage void noinstr do_ri(struct pt_regs *regs)
  410. {
  411. int status = SIGILL;
  412. unsigned int opcode = 0;
  413. unsigned int __user *era = (unsigned int __user *)exception_era(regs);
  414. irqentry_state_t state = irqentry_enter(regs);
  415. local_irq_enable();
  416. current->thread.trap_nr = read_csr_excode();
  417. if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
  418. SIGILL) == NOTIFY_STOP)
  419. goto out;
  420. die_if_kernel("Reserved instruction in kernel code", regs);
  421. if (unlikely(get_user(opcode, era) < 0)) {
  422. status = SIGSEGV;
  423. current->thread.error_code = 1;
  424. }
  425. force_sig(status);
  426. out:
  427. local_irq_disable();
  428. irqentry_exit(regs, state);
  429. }
  430. static void init_restore_fp(void)
  431. {
  432. if (!used_math()) {
  433. /* First time FP context user. */
  434. init_fpu();
  435. } else {
  436. /* This task has formerly used the FP context */
  437. if (!is_fpu_owner())
  438. own_fpu_inatomic(1);
  439. }
  440. BUG_ON(!is_fp_enabled());
  441. }
  442. asmlinkage void noinstr do_fpu(struct pt_regs *regs)
  443. {
  444. irqentry_state_t state = irqentry_enter(regs);
  445. local_irq_enable();
  446. die_if_kernel("do_fpu invoked from kernel context!", regs);
  447. preempt_disable();
  448. init_restore_fp();
  449. preempt_enable();
  450. local_irq_disable();
  451. irqentry_exit(regs, state);
  452. }
  453. asmlinkage void noinstr do_lsx(struct pt_regs *regs)
  454. {
  455. irqentry_state_t state = irqentry_enter(regs);
  456. local_irq_enable();
  457. force_sig(SIGILL);
  458. local_irq_disable();
  459. irqentry_exit(regs, state);
  460. }
  461. asmlinkage void noinstr do_lasx(struct pt_regs *regs)
  462. {
  463. irqentry_state_t state = irqentry_enter(regs);
  464. local_irq_enable();
  465. force_sig(SIGILL);
  466. local_irq_disable();
  467. irqentry_exit(regs, state);
  468. }
  469. asmlinkage void noinstr do_lbt(struct pt_regs *regs)
  470. {
  471. irqentry_state_t state = irqentry_enter(regs);
  472. local_irq_enable();
  473. force_sig(SIGILL);
  474. local_irq_disable();
  475. irqentry_exit(regs, state);
  476. }
  477. asmlinkage void noinstr do_reserved(struct pt_regs *regs)
  478. {
  479. irqentry_state_t state = irqentry_enter(regs);
  480. local_irq_enable();
  481. /*
  482. * Game over - no way to handle this if it ever occurs. Most probably
  483. * caused by a fatal error after another hardware/software error.
  484. */
  485. pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
  486. read_csr_excode(), current->pid, current->comm);
  487. die_if_kernel("do_reserved exception", regs);
  488. force_sig(SIGUNUSED);
  489. local_irq_disable();
  490. irqentry_exit(regs, state);
  491. }
  492. asmlinkage void cache_parity_error(void)
  493. {
  494. /* For the moment, report the problem and hang. */
  495. pr_err("Cache error exception:\n");
  496. pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
  497. pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
  498. panic("Can't handle the cache error!");
  499. }
  500. asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
  501. {
  502. struct pt_regs *old_regs;
  503. irq_enter_rcu();
  504. old_regs = set_irq_regs(regs);
  505. handle_arch_irq(regs);
  506. set_irq_regs(old_regs);
  507. irq_exit_rcu();
  508. }
  509. asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
  510. {
  511. register int cpu;
  512. register unsigned long stack;
  513. irqentry_state_t state = irqentry_enter(regs);
  514. cpu = smp_processor_id();
  515. if (on_irq_stack(cpu, sp))
  516. handle_loongarch_irq(regs);
  517. else {
  518. stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
  519. /* Save task's sp on IRQ stack for unwinding */
  520. *(unsigned long *)stack = sp;
  521. __asm__ __volatile__(
  522. "move $s0, $sp \n" /* Preserve sp */
  523. "move $sp, %[stk] \n" /* Switch stack */
  524. "move $a0, %[regs] \n"
  525. "bl handle_loongarch_irq \n"
  526. "move $sp, $s0 \n" /* Restore sp */
  527. : /* No outputs */
  528. : [stk] "r" (stack), [regs] "r" (regs)
  529. : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
  530. "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
  531. "memory");
  532. }
  533. irqentry_exit(regs, state);
  534. }
  535. unsigned long eentry;
  536. unsigned long tlbrentry;
  537. long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
  538. static void configure_exception_vector(void)
  539. {
  540. eentry = (unsigned long)exception_handlers;
  541. tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
  542. csr_write64(eentry, LOONGARCH_CSR_EENTRY);
  543. csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
  544. csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
  545. }
  546. void per_cpu_trap_init(int cpu)
  547. {
  548. unsigned int i;
  549. setup_vint_size(VECSIZE);
  550. configure_exception_vector();
  551. if (!cpu_data[cpu].asid_cache)
  552. cpu_data[cpu].asid_cache = asid_first_version(cpu);
  553. mmgrab(&init_mm);
  554. current->active_mm = &init_mm;
  555. BUG_ON(current->mm);
  556. enter_lazy_tlb(&init_mm, current);
  557. /* Initialise exception handlers */
  558. if (cpu == 0)
  559. for (i = 0; i < 64; i++)
  560. set_handler(i * VECSIZE, handle_reserved, VECSIZE);
  561. tlb_init(cpu);
  562. cpu_cache_init();
  563. }
  564. /* Install CPU exception handler */
  565. void set_handler(unsigned long offset, void *addr, unsigned long size)
  566. {
  567. memcpy((void *)(eentry + offset), addr, size);
  568. local_flush_icache_range(eentry + offset, eentry + offset + size);
  569. }
  570. static const char panic_null_cerr[] =
  571. "Trying to set NULL cache error exception handler\n";
  572. /*
  573. * Install uncached CPU exception handler.
  574. * This is suitable only for the cache error exception which is the only
  575. * exception handler that is being run uncached.
  576. */
  577. void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
  578. {
  579. unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
  580. if (!addr)
  581. panic(panic_null_cerr);
  582. memcpy((void *)(uncached_eentry + offset), addr, size);
  583. }
  584. void __init trap_init(void)
  585. {
  586. long i;
  587. /* Set interrupt vector handler */
  588. for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++)
  589. set_handler(i * VECSIZE, handle_vint, VECSIZE);
  590. set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
  591. set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
  592. set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
  593. set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
  594. set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
  595. set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
  596. set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
  597. set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
  598. set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
  599. set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
  600. set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
  601. set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
  602. cache_error_setup();
  603. local_flush_icache_range(eentry, eentry + 0x400);
  604. }