traps.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. /*
  2. * linux/arch/m68k/kernel/traps.c
  3. *
  4. * Copyright (C) 1993, 1994 by Hamish Macdonald
  5. *
  6. * 68040 fixes by Michael Rausch
  7. * 68040 fixes by Martin Apel
  8. * 68040 fixes and writeback by Richard Zidlicky
  9. * 68060 fixes by Roman Hodek
  10. * 68060 fixes by Jesper Skov
  11. *
  12. * This file is subject to the terms and conditions of the GNU General Public
  13. * License. See the file COPYING in the main directory of this archive
  14. * for more details.
  15. */
  16. /*
  17. * Sets up all exception vectors
  18. */
  19. #include <linux/sched.h>
  20. #include <linux/sched/debug.h>
  21. #include <linux/signal.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/module.h>
  25. #include <linux/user.h>
  26. #include <linux/string.h>
  27. #include <linux/linkage.h>
  28. #include <linux/init.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/kallsyms.h>
  31. #include <linux/extable.h>
  32. #include <asm/setup.h>
  33. #include <asm/fpu.h>
  34. #include <linux/uaccess.h>
  35. #include <asm/traps.h>
  36. #include <asm/machdep.h>
  37. #include <asm/processor.h>
  38. #include <asm/siginfo.h>
  39. #include <asm/tlbflush.h>
  40. static const char *vec_names[] = {
  41. [VEC_RESETSP] = "RESET SP",
  42. [VEC_RESETPC] = "RESET PC",
  43. [VEC_BUSERR] = "BUS ERROR",
  44. [VEC_ADDRERR] = "ADDRESS ERROR",
  45. [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
  46. [VEC_ZERODIV] = "ZERO DIVIDE",
  47. [VEC_CHK] = "CHK",
  48. [VEC_TRAP] = "TRAPcc",
  49. [VEC_PRIV] = "PRIVILEGE VIOLATION",
  50. [VEC_TRACE] = "TRACE",
  51. [VEC_LINE10] = "LINE 1010",
  52. [VEC_LINE11] = "LINE 1111",
  53. [VEC_RESV12] = "UNASSIGNED RESERVED 12",
  54. [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
  55. [VEC_FORMAT] = "FORMAT ERROR",
  56. [VEC_UNINT] = "UNINITIALIZED INTERRUPT",
  57. [VEC_RESV16] = "UNASSIGNED RESERVED 16",
  58. [VEC_RESV17] = "UNASSIGNED RESERVED 17",
  59. [VEC_RESV18] = "UNASSIGNED RESERVED 18",
  60. [VEC_RESV19] = "UNASSIGNED RESERVED 19",
  61. [VEC_RESV20] = "UNASSIGNED RESERVED 20",
  62. [VEC_RESV21] = "UNASSIGNED RESERVED 21",
  63. [VEC_RESV22] = "UNASSIGNED RESERVED 22",
  64. [VEC_RESV23] = "UNASSIGNED RESERVED 23",
  65. [VEC_SPUR] = "SPURIOUS INTERRUPT",
  66. [VEC_INT1] = "LEVEL 1 INT",
  67. [VEC_INT2] = "LEVEL 2 INT",
  68. [VEC_INT3] = "LEVEL 3 INT",
  69. [VEC_INT4] = "LEVEL 4 INT",
  70. [VEC_INT5] = "LEVEL 5 INT",
  71. [VEC_INT6] = "LEVEL 6 INT",
  72. [VEC_INT7] = "LEVEL 7 INT",
  73. [VEC_SYS] = "SYSCALL",
  74. [VEC_TRAP1] = "TRAP #1",
  75. [VEC_TRAP2] = "TRAP #2",
  76. [VEC_TRAP3] = "TRAP #3",
  77. [VEC_TRAP4] = "TRAP #4",
  78. [VEC_TRAP5] = "TRAP #5",
  79. [VEC_TRAP6] = "TRAP #6",
  80. [VEC_TRAP7] = "TRAP #7",
  81. [VEC_TRAP8] = "TRAP #8",
  82. [VEC_TRAP9] = "TRAP #9",
  83. [VEC_TRAP10] = "TRAP #10",
  84. [VEC_TRAP11] = "TRAP #11",
  85. [VEC_TRAP12] = "TRAP #12",
  86. [VEC_TRAP13] = "TRAP #13",
  87. [VEC_TRAP14] = "TRAP #14",
  88. [VEC_TRAP15] = "TRAP #15",
  89. [VEC_FPBRUC] = "FPCP BSUN",
  90. [VEC_FPIR] = "FPCP INEXACT",
  91. [VEC_FPDIVZ] = "FPCP DIV BY 0",
  92. [VEC_FPUNDER] = "FPCP UNDERFLOW",
  93. [VEC_FPOE] = "FPCP OPERAND ERROR",
  94. [VEC_FPOVER] = "FPCP OVERFLOW",
  95. [VEC_FPNAN] = "FPCP SNAN",
  96. [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
  97. [VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
  98. [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
  99. [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
  100. [VEC_RESV59] = "UNASSIGNED RESERVED 59",
  101. [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
  102. [VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
  103. [VEC_RESV62] = "UNASSIGNED RESERVED 62",
  104. [VEC_RESV63] = "UNASSIGNED RESERVED 63",
  105. };
  106. static const char *space_names[] = {
  107. [0] = "Space 0",
  108. [USER_DATA] = "User Data",
  109. [USER_PROGRAM] = "User Program",
  110. #ifndef CONFIG_SUN3
  111. [3] = "Space 3",
  112. #else
  113. [FC_CONTROL] = "Control",
  114. #endif
  115. [4] = "Space 4",
  116. [SUPER_DATA] = "Super Data",
  117. [SUPER_PROGRAM] = "Super Program",
  118. [CPU_SPACE] = "CPU"
  119. };
  120. void die_if_kernel(char *,struct pt_regs *,int);
  121. asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
  122. unsigned long error_code);
  123. int send_fault_sig(struct pt_regs *regs);
  124. asmlinkage void trap_c(struct frame *fp);
  125. #if defined (CONFIG_M68060)
  126. static inline void access_error060 (struct frame *fp)
  127. {
  128. unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
  129. pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
  130. if (fslw & MMU060_BPE) {
  131. /* branch prediction error -> clear branch cache */
  132. __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
  133. "orl #0x00400000,%/d0\n\t"
  134. "movec %/d0,%/cacr"
  135. : : : "d0" );
  136. /* return if there's no other error */
  137. if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
  138. return;
  139. }
  140. if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
  141. unsigned long errorcode;
  142. unsigned long addr = fp->un.fmt4.effaddr;
  143. if (fslw & MMU060_MA)
  144. addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
  145. errorcode = 1;
  146. if (fslw & MMU060_DESC_ERR) {
  147. __flush_tlb040_one(addr);
  148. errorcode = 0;
  149. }
  150. if (fslw & MMU060_W)
  151. errorcode |= 2;
  152. pr_debug("errorcode = %ld\n", errorcode);
  153. do_page_fault(&fp->ptregs, addr, errorcode);
  154. } else if (fslw & (MMU060_SEE)){
  155. /* Software Emulation Error.
  156. * fault during mem_read/mem_write in ifpsp060/os.S
  157. */
  158. send_fault_sig(&fp->ptregs);
  159. } else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
  160. send_fault_sig(&fp->ptregs) > 0) {
  161. pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc,
  162. fp->un.fmt4.effaddr);
  163. pr_err("68060 access error, fslw=%lx\n", fslw);
  164. trap_c( fp );
  165. }
  166. }
  167. #endif /* CONFIG_M68060 */
  168. #if defined (CONFIG_M68040)
  169. static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
  170. {
  171. unsigned long mmusr;
  172. set_fc(wbs);
  173. if (iswrite)
  174. asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
  175. else
  176. asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
  177. asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
  178. set_fc(USER_DATA);
  179. return mmusr;
  180. }
  181. static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
  182. unsigned long wbd)
  183. {
  184. int res = 0;
  185. set_fc(wbs);
  186. switch (wbs & WBSIZ_040) {
  187. case BA_SIZE_BYTE:
  188. res = put_user(wbd & 0xff, (char __user *)wba);
  189. break;
  190. case BA_SIZE_WORD:
  191. res = put_user(wbd & 0xffff, (short __user *)wba);
  192. break;
  193. case BA_SIZE_LONG:
  194. res = put_user(wbd, (int __user *)wba);
  195. break;
  196. }
  197. set_fc(USER_DATA);
  198. pr_debug("do_040writeback1, res=%d\n", res);
  199. return res;
  200. }
  201. /* after an exception in a writeback the stack frame corresponding
  202. * to that exception is discarded, set a few bits in the old frame
  203. * to simulate what it should look like
  204. */
  205. static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
  206. {
  207. fp->un.fmt7.faddr = wba;
  208. fp->un.fmt7.ssw = wbs & 0xff;
  209. if (wba != current->thread.faddr)
  210. fp->un.fmt7.ssw |= MA_040;
  211. }
  212. static inline void do_040writebacks(struct frame *fp)
  213. {
  214. int res = 0;
  215. #if 0
  216. if (fp->un.fmt7.wb1s & WBV_040)
  217. pr_err("access_error040: cannot handle 1st writeback. oops.\n");
  218. #endif
  219. if ((fp->un.fmt7.wb2s & WBV_040) &&
  220. !(fp->un.fmt7.wb2s & WBTT_040)) {
  221. res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
  222. fp->un.fmt7.wb2d);
  223. if (res)
  224. fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
  225. else
  226. fp->un.fmt7.wb2s = 0;
  227. }
  228. /* do the 2nd wb only if the first one was successful (except for a kernel wb) */
  229. if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
  230. res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
  231. fp->un.fmt7.wb3d);
  232. if (res)
  233. {
  234. fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
  235. fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
  236. fp->un.fmt7.wb3s &= (~WBV_040);
  237. fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
  238. fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
  239. }
  240. else
  241. fp->un.fmt7.wb3s = 0;
  242. }
  243. if (res)
  244. send_fault_sig(&fp->ptregs);
  245. }
  246. /*
  247. * called from sigreturn(), must ensure userspace code didn't
  248. * manipulate exception frame to circumvent protection, then complete
  249. * pending writebacks
  250. * we just clear TM2 to turn it into a userspace access
  251. */
  252. asmlinkage void berr_040cleanup(struct frame *fp)
  253. {
  254. fp->un.fmt7.wb2s &= ~4;
  255. fp->un.fmt7.wb3s &= ~4;
  256. do_040writebacks(fp);
  257. }
  258. static inline void access_error040(struct frame *fp)
  259. {
  260. unsigned short ssw = fp->un.fmt7.ssw;
  261. unsigned long mmusr;
  262. pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
  263. pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
  264. fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
  265. pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
  266. fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
  267. fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
  268. if (ssw & ATC_040) {
  269. unsigned long addr = fp->un.fmt7.faddr;
  270. unsigned long errorcode;
  271. /*
  272. * The MMU status has to be determined AFTER the address
  273. * has been corrected if there was a misaligned access (MA).
  274. */
  275. if (ssw & MA_040)
  276. addr = (addr + 7) & -8;
  277. /* MMU error, get the MMUSR info for this access */
  278. mmusr = probe040(!(ssw & RW_040), addr, ssw);
  279. pr_debug("mmusr = %lx\n", mmusr);
  280. errorcode = 1;
  281. if (!(mmusr & MMU_R_040)) {
  282. /* clear the invalid atc entry */
  283. __flush_tlb040_one(addr);
  284. errorcode = 0;
  285. }
  286. /* despite what documentation seems to say, RMW
  287. * accesses have always both the LK and RW bits set */
  288. if (!(ssw & RW_040) || (ssw & LK_040))
  289. errorcode |= 2;
  290. if (do_page_fault(&fp->ptregs, addr, errorcode)) {
  291. pr_debug("do_page_fault() !=0\n");
  292. if (user_mode(&fp->ptregs)){
  293. /* delay writebacks after signal delivery */
  294. pr_debug(".. was usermode - return\n");
  295. return;
  296. }
  297. /* disable writeback into user space from kernel
  298. * (if do_page_fault didn't fix the mapping,
  299. * the writeback won't do good)
  300. */
  301. disable_wb:
  302. pr_debug(".. disabling wb2\n");
  303. if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
  304. fp->un.fmt7.wb2s &= ~WBV_040;
  305. if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
  306. fp->un.fmt7.wb3s &= ~WBV_040;
  307. }
  308. } else {
  309. /* In case of a bus error we either kill the process or expect
  310. * the kernel to catch the fault, which then is also responsible
  311. * for cleaning up the mess.
  312. */
  313. current->thread.signo = SIGBUS;
  314. current->thread.faddr = fp->un.fmt7.faddr;
  315. if (send_fault_sig(&fp->ptregs) >= 0)
  316. pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
  317. fp->un.fmt7.faddr);
  318. goto disable_wb;
  319. }
  320. do_040writebacks(fp);
  321. }
  322. #endif /* CONFIG_M68040 */
  323. #if defined(CONFIG_SUN3)
  324. #include <asm/sun3mmu.h>
  325. extern int mmu_emu_handle_fault (unsigned long, int, int);
  326. /* sun3 version of bus_error030 */
  327. static inline void bus_error030 (struct frame *fp)
  328. {
  329. unsigned char buserr_type = sun3_get_buserr ();
  330. unsigned long addr, errorcode;
  331. unsigned short ssw = fp->un.fmtb.ssw;
  332. extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
  333. if (ssw & (FC | FB))
  334. pr_debug("Instruction fault at %#010lx\n",
  335. ssw & FC ?
  336. fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
  337. :
  338. fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
  339. if (ssw & DF)
  340. pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  341. ssw & RW ? "read" : "write",
  342. fp->un.fmtb.daddr,
  343. space_names[ssw & DFC], fp->ptregs.pc);
  344. /*
  345. * Check if this page should be demand-mapped. This needs to go before
  346. * the testing for a bad kernel-space access (demand-mapping applies
  347. * to kernel accesses too).
  348. */
  349. if ((ssw & DF)
  350. && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
  351. if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
  352. return;
  353. }
  354. /* Check for kernel-space pagefault (BAD). */
  355. if (fp->ptregs.sr & PS_S) {
  356. /* kernel fault must be a data fault to user space */
  357. if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
  358. // try checking the kernel mappings before surrender
  359. if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
  360. return;
  361. /* instruction fault or kernel data fault! */
  362. if (ssw & (FC | FB))
  363. pr_err("Instruction fault at %#010lx\n",
  364. fp->ptregs.pc);
  365. if (ssw & DF) {
  366. /* was this fault incurred testing bus mappings? */
  367. if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
  368. (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
  369. send_fault_sig(&fp->ptregs);
  370. return;
  371. }
  372. pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  373. ssw & RW ? "read" : "write",
  374. fp->un.fmtb.daddr,
  375. space_names[ssw & DFC], fp->ptregs.pc);
  376. }
  377. pr_err("BAD KERNEL BUSERR\n");
  378. die_if_kernel("Oops", &fp->ptregs,0);
  379. force_sig(SIGKILL);
  380. return;
  381. }
  382. } else {
  383. /* user fault */
  384. if (!(ssw & (FC | FB)) && !(ssw & DF))
  385. /* not an instruction fault or data fault! BAD */
  386. panic ("USER BUSERR w/o instruction or data fault");
  387. }
  388. /* First handle the data fault, if any. */
  389. if (ssw & DF) {
  390. addr = fp->un.fmtb.daddr;
  391. // errorcode bit 0: 0 -> no page 1 -> protection fault
  392. // errorcode bit 1: 0 -> read fault 1 -> write fault
  393. // (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
  394. // (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
  395. if (buserr_type & SUN3_BUSERR_PROTERR)
  396. errorcode = 0x01;
  397. else if (buserr_type & SUN3_BUSERR_INVALID)
  398. errorcode = 0x00;
  399. else {
  400. pr_debug("*** unexpected busfault type=%#04x\n",
  401. buserr_type);
  402. pr_debug("invalid %s access at %#lx from pc %#lx\n",
  403. !(ssw & RW) ? "write" : "read", addr,
  404. fp->ptregs.pc);
  405. die_if_kernel ("Oops", &fp->ptregs, buserr_type);
  406. force_sig (SIGBUS);
  407. return;
  408. }
  409. //todo: wtf is RM bit? --m
  410. if (!(ssw & RW) || ssw & RM)
  411. errorcode |= 0x02;
  412. /* Handle page fault. */
  413. do_page_fault (&fp->ptregs, addr, errorcode);
  414. /* Retry the data fault now. */
  415. return;
  416. }
  417. /* Now handle the instruction fault. */
  418. /* Get the fault address. */
  419. if (fp->ptregs.format == 0xA)
  420. addr = fp->ptregs.pc + 4;
  421. else
  422. addr = fp->un.fmtb.baddr;
  423. if (ssw & FC)
  424. addr -= 2;
  425. if (buserr_type & SUN3_BUSERR_INVALID) {
  426. if (!mmu_emu_handle_fault(addr, 1, 0))
  427. do_page_fault (&fp->ptregs, addr, 0);
  428. } else {
  429. pr_debug("protection fault on insn access (segv).\n");
  430. force_sig (SIGSEGV);
  431. }
  432. }
  433. #else
  434. #if defined(CPU_M68020_OR_M68030)
  435. static inline void bus_error030 (struct frame *fp)
  436. {
  437. volatile unsigned short temp;
  438. unsigned short mmusr;
  439. unsigned long addr, errorcode;
  440. unsigned short ssw = fp->un.fmtb.ssw;
  441. #ifdef DEBUG
  442. unsigned long desc;
  443. #endif
  444. pr_debug("pid = %x ", current->pid);
  445. pr_debug("SSW=%#06x ", ssw);
  446. if (ssw & (FC | FB))
  447. pr_debug("Instruction fault at %#010lx\n",
  448. ssw & FC ?
  449. fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
  450. :
  451. fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
  452. if (ssw & DF)
  453. pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  454. ssw & RW ? "read" : "write",
  455. fp->un.fmtb.daddr,
  456. space_names[ssw & DFC], fp->ptregs.pc);
  457. /* ++andreas: If a data fault and an instruction fault happen
  458. at the same time map in both pages. */
  459. /* First handle the data fault, if any. */
  460. if (ssw & DF) {
  461. addr = fp->un.fmtb.daddr;
  462. #ifdef DEBUG
  463. asm volatile ("ptestr %3,%2@,#7,%0\n\t"
  464. "pmove %%psr,%1"
  465. : "=a&" (desc), "=m" (temp)
  466. : "a" (addr), "d" (ssw));
  467. pr_debug("mmusr is %#x for addr %#lx in task %p\n",
  468. temp, addr, current);
  469. pr_debug("descriptor address is 0x%p, contents %#lx\n",
  470. __va(desc), *(unsigned long *)__va(desc));
  471. #else
  472. asm volatile ("ptestr %2,%1@,#7\n\t"
  473. "pmove %%psr,%0"
  474. : "=m" (temp) : "a" (addr), "d" (ssw));
  475. #endif
  476. mmusr = temp;
  477. errorcode = (mmusr & MMU_I) ? 0 : 1;
  478. if (!(ssw & RW) || (ssw & RM))
  479. errorcode |= 2;
  480. if (mmusr & (MMU_I | MMU_WP)) {
  481. /* We might have an exception table for this PC */
  482. if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
  483. pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  484. ssw & RW ? "read" : "write",
  485. fp->un.fmtb.daddr,
  486. space_names[ssw & DFC], fp->ptregs.pc);
  487. goto buserr;
  488. }
  489. /* Don't try to do anything further if an exception was
  490. handled. */
  491. if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
  492. return;
  493. } else if (!(mmusr & MMU_I)) {
  494. /* probably a 020 cas fault */
  495. if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
  496. pr_err("unexpected bus error (%#x,%#x)\n", ssw,
  497. mmusr);
  498. } else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
  499. pr_err("invalid %s access at %#lx from pc %#lx\n",
  500. !(ssw & RW) ? "write" : "read", addr,
  501. fp->ptregs.pc);
  502. die_if_kernel("Oops",&fp->ptregs,mmusr);
  503. force_sig(SIGSEGV);
  504. return;
  505. } else {
  506. #if 0
  507. static volatile long tlong;
  508. #endif
  509. pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
  510. !(ssw & RW) ? "write" : "read", addr,
  511. fp->ptregs.pc, ssw);
  512. asm volatile ("ptestr #1,%1@,#0\n\t"
  513. "pmove %%psr,%0"
  514. : "=m" (temp)
  515. : "a" (addr));
  516. mmusr = temp;
  517. pr_err("level 0 mmusr is %#x\n", mmusr);
  518. #if 0
  519. asm volatile ("pmove %%tt0,%0"
  520. : "=m" (tlong));
  521. pr_debug("tt0 is %#lx, ", tlong);
  522. asm volatile ("pmove %%tt1,%0"
  523. : "=m" (tlong));
  524. pr_debug("tt1 is %#lx\n", tlong);
  525. #endif
  526. pr_debug("Unknown SIGSEGV - 1\n");
  527. die_if_kernel("Oops",&fp->ptregs,mmusr);
  528. force_sig(SIGSEGV);
  529. return;
  530. }
  531. /* setup an ATC entry for the access about to be retried */
  532. if (!(ssw & RW) || (ssw & RM))
  533. asm volatile ("ploadw %1,%0@" : /* no outputs */
  534. : "a" (addr), "d" (ssw));
  535. else
  536. asm volatile ("ploadr %1,%0@" : /* no outputs */
  537. : "a" (addr), "d" (ssw));
  538. }
  539. /* Now handle the instruction fault. */
  540. if (!(ssw & (FC|FB)))
  541. return;
  542. if (fp->ptregs.sr & PS_S) {
  543. pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc);
  544. buserr:
  545. pr_err("BAD KERNEL BUSERR\n");
  546. die_if_kernel("Oops",&fp->ptregs,0);
  547. force_sig(SIGKILL);
  548. return;
  549. }
  550. /* get the fault address */
  551. if (fp->ptregs.format == 10)
  552. addr = fp->ptregs.pc + 4;
  553. else
  554. addr = fp->un.fmtb.baddr;
  555. if (ssw & FC)
  556. addr -= 2;
  557. if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
  558. /* Insn fault on same page as data fault. But we
  559. should still create the ATC entry. */
  560. goto create_atc_entry;
  561. #ifdef DEBUG
  562. asm volatile ("ptestr #1,%2@,#7,%0\n\t"
  563. "pmove %%psr,%1"
  564. : "=a&" (desc), "=m" (temp)
  565. : "a" (addr));
  566. pr_debug("mmusr is %#x for addr %#lx in task %p\n",
  567. temp, addr, current);
  568. pr_debug("descriptor address is 0x%p, contents %#lx\n",
  569. __va(desc), *(unsigned long *)__va(desc));
  570. #else
  571. asm volatile ("ptestr #1,%1@,#7\n\t"
  572. "pmove %%psr,%0"
  573. : "=m" (temp) : "a" (addr));
  574. #endif
  575. mmusr = temp;
  576. if (mmusr & MMU_I)
  577. do_page_fault (&fp->ptregs, addr, 0);
  578. else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
  579. pr_err("invalid insn access at %#lx from pc %#lx\n",
  580. addr, fp->ptregs.pc);
  581. pr_debug("Unknown SIGSEGV - 2\n");
  582. die_if_kernel("Oops",&fp->ptregs,mmusr);
  583. force_sig(SIGSEGV);
  584. return;
  585. }
  586. create_atc_entry:
  587. /* setup an ATC entry for the access about to be retried */
  588. asm volatile ("ploadr #2,%0@" : /* no outputs */
  589. : "a" (addr));
  590. }
  591. #endif /* CPU_M68020_OR_M68030 */
  592. #endif /* !CONFIG_SUN3 */
  593. #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
  594. #include <asm/mcfmmu.h>
  595. /*
  596. * The following table converts the FS encoding of a ColdFire
  597. * exception stack frame into the error_code value needed by
  598. * do_fault.
  599. */
  600. static const unsigned char fs_err_code[] = {
  601. 0, /* 0000 */
  602. 0, /* 0001 */
  603. 0, /* 0010 */
  604. 0, /* 0011 */
  605. 1, /* 0100 */
  606. 0, /* 0101 */
  607. 0, /* 0110 */
  608. 0, /* 0111 */
  609. 2, /* 1000 */
  610. 3, /* 1001 */
  611. 2, /* 1010 */
  612. 0, /* 1011 */
  613. 1, /* 1100 */
  614. 1, /* 1101 */
  615. 0, /* 1110 */
  616. 0 /* 1111 */
  617. };
  618. static inline void access_errorcf(unsigned int fs, struct frame *fp)
  619. {
  620. unsigned long mmusr, addr;
  621. unsigned int err_code;
  622. int need_page_fault;
  623. mmusr = mmu_read(MMUSR);
  624. addr = mmu_read(MMUAR);
  625. /*
  626. * error_code:
  627. * bit 0 == 0 means no page found, 1 means protection fault
  628. * bit 1 == 0 means read, 1 means write
  629. */
  630. switch (fs) {
  631. case 5: /* 0101 TLB opword X miss */
  632. need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
  633. addr = fp->ptregs.pc;
  634. break;
  635. case 6: /* 0110 TLB extension word X miss */
  636. need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
  637. addr = fp->ptregs.pc + sizeof(long);
  638. break;
  639. case 10: /* 1010 TLB W miss */
  640. need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
  641. break;
  642. case 14: /* 1110 TLB R miss */
  643. need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
  644. break;
  645. default:
  646. /* 0000 Normal */
  647. /* 0001 Reserved */
  648. /* 0010 Interrupt during debug service routine */
  649. /* 0011 Reserved */
  650. /* 0100 X Protection */
  651. /* 0111 IFP in emulator mode */
  652. /* 1000 W Protection*/
  653. /* 1001 Write error*/
  654. /* 1011 Reserved*/
  655. /* 1100 R Protection*/
  656. /* 1101 R Protection*/
  657. /* 1111 OEP in emulator mode*/
  658. need_page_fault = 1;
  659. break;
  660. }
  661. if (need_page_fault) {
  662. err_code = fs_err_code[fs];
  663. if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
  664. err_code |= 2; /* bit1 - write, bit0 - protection */
  665. do_page_fault(&fp->ptregs, addr, err_code);
  666. }
  667. }
  668. #endif /* CONFIG_COLDFIRE CONFIG_MMU */
  669. asmlinkage void buserr_c(struct frame *fp)
  670. {
  671. /* Only set esp0 if coming from user mode */
  672. if (user_mode(&fp->ptregs))
  673. current->thread.esp0 = (unsigned long) fp;
  674. pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format);
  675. #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
  676. if (CPU_IS_COLDFIRE) {
  677. unsigned int fs;
  678. fs = (fp->ptregs.vector & 0x3) |
  679. ((fp->ptregs.vector & 0xc00) >> 8);
  680. switch (fs) {
  681. case 0x5:
  682. case 0x6:
  683. case 0x7:
  684. case 0x9:
  685. case 0xa:
  686. case 0xd:
  687. case 0xe:
  688. case 0xf:
  689. access_errorcf(fs, fp);
  690. return;
  691. default:
  692. break;
  693. }
  694. }
  695. #endif /* CONFIG_COLDFIRE && CONFIG_MMU */
  696. switch (fp->ptregs.format) {
  697. #if defined (CONFIG_M68060)
  698. case 4: /* 68060 access error */
  699. access_error060 (fp);
  700. break;
  701. #endif
  702. #if defined (CONFIG_M68040)
  703. case 0x7: /* 68040 access error */
  704. access_error040 (fp);
  705. break;
  706. #endif
  707. #if defined (CPU_M68020_OR_M68030)
  708. case 0xa:
  709. case 0xb:
  710. bus_error030 (fp);
  711. break;
  712. #endif
  713. default:
  714. die_if_kernel("bad frame format",&fp->ptregs,0);
  715. pr_debug("Unknown SIGSEGV - 4\n");
  716. force_sig(SIGSEGV);
  717. }
  718. }
  719. static int kstack_depth_to_print = 48;
  720. static void show_trace(unsigned long *stack, const char *loglvl)
  721. {
  722. unsigned long *endstack;
  723. unsigned long addr;
  724. int i;
  725. printk("%sCall Trace:", loglvl);
  726. addr = (unsigned long)stack + THREAD_SIZE - 1;
  727. endstack = (unsigned long *)(addr & -THREAD_SIZE);
  728. i = 0;
  729. while (stack + 1 <= endstack) {
  730. addr = *stack++;
  731. /*
  732. * If the address is either in the text segment of the
  733. * kernel, or in the region which contains vmalloc'ed
  734. * memory, it *may* be the address of a calling
  735. * routine; if so, print it so that someone tracing
  736. * down the cause of the crash will be able to figure
  737. * out the call path that was taken.
  738. */
  739. if (__kernel_text_address(addr)) {
  740. #ifndef CONFIG_KALLSYMS
  741. if (i % 5 == 0)
  742. pr_cont("\n ");
  743. #endif
  744. pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr);
  745. i++;
  746. }
  747. }
  748. pr_cont("\n");
  749. }
  750. void show_registers(struct pt_regs *regs)
  751. {
  752. struct frame *fp = (struct frame *)regs;
  753. u16 c, *cp;
  754. unsigned long addr;
  755. int i;
  756. print_modules();
  757. pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
  758. pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
  759. pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
  760. regs->d0, regs->d1, regs->d2, regs->d3);
  761. pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
  762. regs->d4, regs->d5, regs->a0, regs->a1);
  763. pr_info("Process %s (pid: %d, task=%p)\n",
  764. current->comm, task_pid_nr(current), current);
  765. addr = (unsigned long)&fp->un;
  766. pr_info("Frame format=%X ", regs->format);
  767. switch (regs->format) {
  768. case 0x2:
  769. pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr);
  770. addr += sizeof(fp->un.fmt2);
  771. break;
  772. case 0x3:
  773. pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr);
  774. addr += sizeof(fp->un.fmt3);
  775. break;
  776. case 0x4:
  777. if (CPU_IS_060)
  778. pr_cont("fault addr=%08lx fslw=%08lx\n",
  779. fp->un.fmt4.effaddr, fp->un.fmt4.pc);
  780. else
  781. pr_cont("eff addr=%08lx pc=%08lx\n",
  782. fp->un.fmt4.effaddr, fp->un.fmt4.pc);
  783. addr += sizeof(fp->un.fmt4);
  784. break;
  785. case 0x7:
  786. pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n",
  787. fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
  788. pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n",
  789. fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
  790. pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n",
  791. fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
  792. pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n",
  793. fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
  794. pr_info("push data: %08lx %08lx %08lx %08lx\n",
  795. fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
  796. fp->un.fmt7.pd3);
  797. addr += sizeof(fp->un.fmt7);
  798. break;
  799. case 0x9:
  800. pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr);
  801. addr += sizeof(fp->un.fmt9);
  802. break;
  803. case 0xa:
  804. pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
  805. fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
  806. fp->un.fmta.daddr, fp->un.fmta.dobuf);
  807. addr += sizeof(fp->un.fmta);
  808. break;
  809. case 0xb:
  810. pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
  811. fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
  812. fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
  813. pr_info("baddr=%08lx dibuf=%08lx ver=%x\n",
  814. fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
  815. addr += sizeof(fp->un.fmtb);
  816. break;
  817. default:
  818. pr_cont("\n");
  819. }
  820. show_stack(NULL, (unsigned long *)addr, KERN_INFO);
  821. pr_info("Code:");
  822. cp = (u16 *)regs->pc;
  823. for (i = -8; i < 16; i++) {
  824. if (get_kernel_nofault(c, cp + i) && i >= 0) {
  825. pr_cont(" Bad PC value.");
  826. break;
  827. }
  828. if (i)
  829. pr_cont(" %04x", c);
  830. else
  831. pr_cont(" <%04x>", c);
  832. }
  833. pr_cont("\n");
  834. }
  835. void show_stack(struct task_struct *task, unsigned long *stack,
  836. const char *loglvl)
  837. {
  838. unsigned long *p;
  839. unsigned long *endstack;
  840. int i;
  841. if (!stack) {
  842. if (task)
  843. stack = (unsigned long *)task->thread.esp0;
  844. else
  845. stack = (unsigned long *)&stack;
  846. }
  847. endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
  848. printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
  849. p = stack;
  850. for (i = 0; i < kstack_depth_to_print; i++) {
  851. if (p + 1 > endstack)
  852. break;
  853. if (i % 8 == 0)
  854. pr_cont("\n ");
  855. pr_cont(" %08lx", *p++);
  856. }
  857. pr_cont("\n");
  858. show_trace(stack, loglvl);
  859. }
  860. /*
  861. * The vector number returned in the frame pointer may also contain
  862. * the "fs" (Fault Status) bits on ColdFire. These are in the bottom
  863. * 2 bits, and upper 2 bits. So we need to mask out the real vector
  864. * number before using it in comparisons. You don't need to do this on
  865. * real 68k parts, but it won't hurt either.
  866. */
  867. void bad_super_trap (struct frame *fp)
  868. {
  869. int vector = (fp->ptregs.vector >> 2) & 0xff;
  870. console_verbose();
  871. if (vector < ARRAY_SIZE(vec_names))
  872. pr_err("*** %s *** FORMAT=%X\n",
  873. vec_names[vector],
  874. fp->ptregs.format);
  875. else
  876. pr_err("*** Exception %d *** FORMAT=%X\n",
  877. vector, fp->ptregs.format);
  878. if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) {
  879. unsigned short ssw = fp->un.fmtb.ssw;
  880. pr_err("SSW=%#06x ", ssw);
  881. if (ssw & RC)
  882. pr_err("Pipe stage C instruction fault at %#010lx\n",
  883. (fp->ptregs.format) == 0xA ?
  884. fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
  885. if (ssw & RB)
  886. pr_err("Pipe stage B instruction fault at %#010lx\n",
  887. (fp->ptregs.format) == 0xA ?
  888. fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
  889. if (ssw & DF)
  890. pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
  891. ssw & RW ? "read" : "write",
  892. fp->un.fmtb.daddr, space_names[ssw & DFC],
  893. fp->ptregs.pc);
  894. }
  895. pr_err("Current process id is %d\n", task_pid_nr(current));
  896. die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
  897. }
  898. asmlinkage void trap_c(struct frame *fp)
  899. {
  900. int sig, si_code;
  901. void __user *addr;
  902. int vector = (fp->ptregs.vector >> 2) & 0xff;
  903. if (fp->ptregs.sr & PS_S) {
  904. if (vector == VEC_TRACE) {
  905. /* traced a trapping instruction on a 68020/30,
  906. * real exception will be executed afterwards.
  907. */
  908. return;
  909. }
  910. #ifdef CONFIG_MMU
  911. if (fixup_exception(&fp->ptregs))
  912. return;
  913. #endif
  914. bad_super_trap(fp);
  915. return;
  916. }
  917. /* send the appropriate signal to the user program */
  918. switch (vector) {
  919. case VEC_ADDRERR:
  920. si_code = BUS_ADRALN;
  921. sig = SIGBUS;
  922. break;
  923. case VEC_ILLEGAL:
  924. case VEC_LINE10:
  925. case VEC_LINE11:
  926. si_code = ILL_ILLOPC;
  927. sig = SIGILL;
  928. break;
  929. case VEC_PRIV:
  930. si_code = ILL_PRVOPC;
  931. sig = SIGILL;
  932. break;
  933. case VEC_COPROC:
  934. si_code = ILL_COPROC;
  935. sig = SIGILL;
  936. break;
  937. case VEC_TRAP1:
  938. case VEC_TRAP2:
  939. case VEC_TRAP3:
  940. case VEC_TRAP4:
  941. case VEC_TRAP5:
  942. case VEC_TRAP6:
  943. case VEC_TRAP7:
  944. case VEC_TRAP8:
  945. case VEC_TRAP9:
  946. case VEC_TRAP10:
  947. case VEC_TRAP11:
  948. case VEC_TRAP12:
  949. case VEC_TRAP13:
  950. case VEC_TRAP14:
  951. si_code = ILL_ILLTRP;
  952. sig = SIGILL;
  953. break;
  954. case VEC_FPBRUC:
  955. case VEC_FPOE:
  956. case VEC_FPNAN:
  957. si_code = FPE_FLTINV;
  958. sig = SIGFPE;
  959. break;
  960. case VEC_FPIR:
  961. si_code = FPE_FLTRES;
  962. sig = SIGFPE;
  963. break;
  964. case VEC_FPDIVZ:
  965. si_code = FPE_FLTDIV;
  966. sig = SIGFPE;
  967. break;
  968. case VEC_FPUNDER:
  969. si_code = FPE_FLTUND;
  970. sig = SIGFPE;
  971. break;
  972. case VEC_FPOVER:
  973. si_code = FPE_FLTOVF;
  974. sig = SIGFPE;
  975. break;
  976. case VEC_ZERODIV:
  977. si_code = FPE_INTDIV;
  978. sig = SIGFPE;
  979. break;
  980. case VEC_CHK:
  981. case VEC_TRAP:
  982. si_code = FPE_INTOVF;
  983. sig = SIGFPE;
  984. break;
  985. case VEC_TRACE: /* ptrace single step */
  986. si_code = TRAP_TRACE;
  987. sig = SIGTRAP;
  988. break;
  989. case VEC_TRAP15: /* breakpoint */
  990. si_code = TRAP_BRKPT;
  991. sig = SIGTRAP;
  992. break;
  993. default:
  994. si_code = ILL_ILLOPC;
  995. sig = SIGILL;
  996. break;
  997. }
  998. switch (fp->ptregs.format) {
  999. default:
  1000. addr = (void __user *) fp->ptregs.pc;
  1001. break;
  1002. case 2:
  1003. addr = (void __user *) fp->un.fmt2.iaddr;
  1004. break;
  1005. case 7:
  1006. addr = (void __user *) fp->un.fmt7.effaddr;
  1007. break;
  1008. case 9:
  1009. addr = (void __user *) fp->un.fmt9.iaddr;
  1010. break;
  1011. case 10:
  1012. addr = (void __user *) fp->un.fmta.daddr;
  1013. break;
  1014. case 11:
  1015. addr = (void __user*) fp->un.fmtb.daddr;
  1016. break;
  1017. }
  1018. force_sig_fault(sig, si_code, addr);
  1019. }
  1020. void die_if_kernel (char *str, struct pt_regs *fp, int nr)
  1021. {
  1022. if (!(fp->sr & PS_S))
  1023. return;
  1024. console_verbose();
  1025. pr_crit("%s: %08x\n", str, nr);
  1026. show_registers(fp);
  1027. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  1028. make_task_dead(SIGSEGV);
  1029. }
  1030. asmlinkage void set_esp0(unsigned long ssp)
  1031. {
  1032. current->thread.esp0 = ssp;
  1033. }
  1034. /*
  1035. * This function is called if an error occur while accessing
  1036. * user-space from the fpsp040 code.
  1037. */
  1038. asmlinkage void fpsp040_die(void)
  1039. {
  1040. force_exit_sig(SIGSEGV);
  1041. }
  1042. #ifdef CONFIG_M68KFPU_EMU
  1043. asmlinkage void fpemu_signal(int signal, int code, void *addr)
  1044. {
  1045. force_sig_fault(signal, code, addr);
  1046. }
  1047. #endif