unaligned.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572
  1. /*
  2. * Handle unaligned accesses by emulation.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
  9. * Copyright (C) 1999 Silicon Graphics, Inc.
  10. * Copyright (C) 2014 Imagination Technologies Ltd.
  11. *
  12. * This file contains exception handler for address error exception with the
  13. * special capability to execute faulting instructions in software. The
  14. * handler does not try to handle the case when the program counter points
  15. * to an address not aligned to a word boundary.
  16. *
  17. * Putting data to unaligned addresses is a bad practice even on Intel where
  18. * only the performance is affected. Much worse is that such code is non-
  19. * portable. Due to several programs that die on MIPS due to alignment
  20. * problems I decided to implement this handler anyway though I originally
  21. * didn't intend to do this at all for user code.
  22. *
  23. * For now I enable fixing of address errors by default to make life easier.
  24. * I however intend to disable this somewhen in the future when the alignment
  25. * problems with user programs have been fixed. For programmers this is the
  26. * right way to go.
  27. *
  28. * Fixing address errors is a per process option. The option is inherited
  29. * across fork(2) and execve(2) calls. If you really want to use the
  30. * option in your user programs - I discourage the use of the software
  31. * emulation strongly - use the following code in your userland stuff:
  32. *
  33. * #include <sys/sysmips.h>
  34. *
  35. * ...
  36. * sysmips(MIPS_FIXADE, x);
  37. * ...
  38. *
  39. * The argument x is 0 for disabling software emulation, enabled otherwise.
  40. *
  41. * Below a little program to play around with this feature.
  42. *
  43. * #include <stdio.h>
  44. * #include <sys/sysmips.h>
  45. *
  46. * struct foo {
  47. * unsigned char bar[8];
  48. * };
  49. *
  50. * main(int argc, char *argv[])
  51. * {
  52. * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53. * unsigned int *p = (unsigned int *) (x.bar + 3);
  54. * int i;
  55. *
  56. * if (argc > 1)
  57. * sysmips(MIPS_FIXADE, atoi(argv[1]));
  58. *
  59. * printf("*p = %08lx\n", *p);
  60. *
  61. * *p = 0xdeadface;
  62. *
  63. * for(i = 0; i <= 7; i++)
  64. * printf("%02x ", x.bar[i]);
  65. * printf("\n");
  66. * }
  67. *
  68. * Coprocessor loads are not supported; I think this case is unimportant
  69. * in the practice.
  70. *
  71. * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72. * exception for the R6000.
  73. * A store crossing a page boundary might be executed only partially.
  74. * Undo the partial store in this case.
  75. */
  76. #include <linux/context_tracking.h>
  77. #include <linux/mm.h>
  78. #include <linux/signal.h>
  79. #include <linux/smp.h>
  80. #include <linux/sched.h>
  81. #include <linux/debugfs.h>
  82. #include <linux/perf_event.h>
  83. #include <asm/asm.h>
  84. #include <asm/branch.h>
  85. #include <asm/byteorder.h>
  86. #include <asm/cop2.h>
  87. #include <asm/debug.h>
  88. #include <asm/fpu.h>
  89. #include <asm/fpu_emulator.h>
  90. #include <asm/inst.h>
  91. #include <asm/unaligned-emul.h>
  92. #include <asm/mmu_context.h>
  93. #include <linux/uaccess.h>
  94. #include "access-helper.h"
  95. enum {
  96. UNALIGNED_ACTION_QUIET,
  97. UNALIGNED_ACTION_SIGNAL,
  98. UNALIGNED_ACTION_SHOW,
  99. };
  100. #ifdef CONFIG_DEBUG_FS
  101. static u32 unaligned_instructions;
  102. static u32 unaligned_action;
  103. #else
  104. #define unaligned_action UNALIGNED_ACTION_QUIET
  105. #endif
  106. extern void show_registers(struct pt_regs *regs);
  107. static void emulate_load_store_insn(struct pt_regs *regs,
  108. void __user *addr, unsigned int *pc)
  109. {
  110. unsigned long origpc, orig31, value;
  111. union mips_instruction insn;
  112. unsigned int res;
  113. bool user = user_mode(regs);
  114. origpc = (unsigned long)pc;
  115. orig31 = regs->regs[31];
  116. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
  117. /*
  118. * This load never faults.
  119. */
  120. __get_inst32(&insn.word, pc, user);
  121. switch (insn.i_format.opcode) {
  122. /*
  123. * These are instructions that a compiler doesn't generate. We
  124. * can assume therefore that the code is MIPS-aware and
  125. * really buggy. Emulating these instructions would break the
  126. * semantics anyway.
  127. */
  128. case ll_op:
  129. case lld_op:
  130. case sc_op:
  131. case scd_op:
  132. /*
  133. * For these instructions the only way to create an address
  134. * error is an attempted access to kernel/supervisor address
  135. * space.
  136. */
  137. case ldl_op:
  138. case ldr_op:
  139. case lwl_op:
  140. case lwr_op:
  141. case sdl_op:
  142. case sdr_op:
  143. case swl_op:
  144. case swr_op:
  145. case lb_op:
  146. case lbu_op:
  147. case sb_op:
  148. goto sigbus;
  149. /*
  150. * The remaining opcodes are the ones that are really of
  151. * interest.
  152. */
  153. case spec3_op:
  154. if (insn.dsp_format.func == lx_op) {
  155. switch (insn.dsp_format.op) {
  156. case lwx_op:
  157. if (user && !access_ok(addr, 4))
  158. goto sigbus;
  159. LoadW(addr, value, res);
  160. if (res)
  161. goto fault;
  162. compute_return_epc(regs);
  163. regs->regs[insn.dsp_format.rd] = value;
  164. break;
  165. case lhx_op:
  166. if (user && !access_ok(addr, 2))
  167. goto sigbus;
  168. LoadHW(addr, value, res);
  169. if (res)
  170. goto fault;
  171. compute_return_epc(regs);
  172. regs->regs[insn.dsp_format.rd] = value;
  173. break;
  174. default:
  175. goto sigill;
  176. }
  177. }
  178. #ifdef CONFIG_EVA
  179. else {
  180. /*
  181. * we can land here only from kernel accessing user
  182. * memory, so we need to "switch" the address limit to
  183. * user space, so that address check can work properly.
  184. */
  185. switch (insn.spec3_format.func) {
  186. case lhe_op:
  187. if (!access_ok(addr, 2))
  188. goto sigbus;
  189. LoadHWE(addr, value, res);
  190. if (res)
  191. goto fault;
  192. compute_return_epc(regs);
  193. regs->regs[insn.spec3_format.rt] = value;
  194. break;
  195. case lwe_op:
  196. if (!access_ok(addr, 4))
  197. goto sigbus;
  198. LoadWE(addr, value, res);
  199. if (res)
  200. goto fault;
  201. compute_return_epc(regs);
  202. regs->regs[insn.spec3_format.rt] = value;
  203. break;
  204. case lhue_op:
  205. if (!access_ok(addr, 2))
  206. goto sigbus;
  207. LoadHWUE(addr, value, res);
  208. if (res)
  209. goto fault;
  210. compute_return_epc(regs);
  211. regs->regs[insn.spec3_format.rt] = value;
  212. break;
  213. case she_op:
  214. if (!access_ok(addr, 2))
  215. goto sigbus;
  216. compute_return_epc(regs);
  217. value = regs->regs[insn.spec3_format.rt];
  218. StoreHWE(addr, value, res);
  219. if (res)
  220. goto fault;
  221. break;
  222. case swe_op:
  223. if (!access_ok(addr, 4))
  224. goto sigbus;
  225. compute_return_epc(regs);
  226. value = regs->regs[insn.spec3_format.rt];
  227. StoreWE(addr, value, res);
  228. if (res)
  229. goto fault;
  230. break;
  231. default:
  232. goto sigill;
  233. }
  234. }
  235. #endif
  236. break;
  237. case lh_op:
  238. if (user && !access_ok(addr, 2))
  239. goto sigbus;
  240. if (IS_ENABLED(CONFIG_EVA) && user)
  241. LoadHWE(addr, value, res);
  242. else
  243. LoadHW(addr, value, res);
  244. if (res)
  245. goto fault;
  246. compute_return_epc(regs);
  247. regs->regs[insn.i_format.rt] = value;
  248. break;
  249. case lw_op:
  250. if (user && !access_ok(addr, 4))
  251. goto sigbus;
  252. if (IS_ENABLED(CONFIG_EVA) && user)
  253. LoadWE(addr, value, res);
  254. else
  255. LoadW(addr, value, res);
  256. if (res)
  257. goto fault;
  258. compute_return_epc(regs);
  259. regs->regs[insn.i_format.rt] = value;
  260. break;
  261. case lhu_op:
  262. if (user && !access_ok(addr, 2))
  263. goto sigbus;
  264. if (IS_ENABLED(CONFIG_EVA) && user)
  265. LoadHWUE(addr, value, res);
  266. else
  267. LoadHWU(addr, value, res);
  268. if (res)
  269. goto fault;
  270. compute_return_epc(regs);
  271. regs->regs[insn.i_format.rt] = value;
  272. break;
  273. case lwu_op:
  274. #ifdef CONFIG_64BIT
  275. /*
  276. * A 32-bit kernel might be running on a 64-bit processor. But
  277. * if we're on a 32-bit processor and an i-cache incoherency
  278. * or race makes us see a 64-bit instruction here the sdl/sdr
  279. * would blow up, so for now we don't handle unaligned 64-bit
  280. * instructions on 32-bit kernels.
  281. */
  282. if (user && !access_ok(addr, 4))
  283. goto sigbus;
  284. LoadWU(addr, value, res);
  285. if (res)
  286. goto fault;
  287. compute_return_epc(regs);
  288. regs->regs[insn.i_format.rt] = value;
  289. break;
  290. #endif /* CONFIG_64BIT */
  291. /* Cannot handle 64-bit instructions in 32-bit kernel */
  292. goto sigill;
  293. case ld_op:
  294. #ifdef CONFIG_64BIT
  295. /*
  296. * A 32-bit kernel might be running on a 64-bit processor. But
  297. * if we're on a 32-bit processor and an i-cache incoherency
  298. * or race makes us see a 64-bit instruction here the sdl/sdr
  299. * would blow up, so for now we don't handle unaligned 64-bit
  300. * instructions on 32-bit kernels.
  301. */
  302. if (user && !access_ok(addr, 8))
  303. goto sigbus;
  304. LoadDW(addr, value, res);
  305. if (res)
  306. goto fault;
  307. compute_return_epc(regs);
  308. regs->regs[insn.i_format.rt] = value;
  309. break;
  310. #endif /* CONFIG_64BIT */
  311. /* Cannot handle 64-bit instructions in 32-bit kernel */
  312. goto sigill;
  313. case sh_op:
  314. if (user && !access_ok(addr, 2))
  315. goto sigbus;
  316. compute_return_epc(regs);
  317. value = regs->regs[insn.i_format.rt];
  318. if (IS_ENABLED(CONFIG_EVA) && user)
  319. StoreHWE(addr, value, res);
  320. else
  321. StoreHW(addr, value, res);
  322. if (res)
  323. goto fault;
  324. break;
  325. case sw_op:
  326. if (user && !access_ok(addr, 4))
  327. goto sigbus;
  328. compute_return_epc(regs);
  329. value = regs->regs[insn.i_format.rt];
  330. if (IS_ENABLED(CONFIG_EVA) && user)
  331. StoreWE(addr, value, res);
  332. else
  333. StoreW(addr, value, res);
  334. if (res)
  335. goto fault;
  336. break;
  337. case sd_op:
  338. #ifdef CONFIG_64BIT
  339. /*
  340. * A 32-bit kernel might be running on a 64-bit processor. But
  341. * if we're on a 32-bit processor and an i-cache incoherency
  342. * or race makes us see a 64-bit instruction here the sdl/sdr
  343. * would blow up, so for now we don't handle unaligned 64-bit
  344. * instructions on 32-bit kernels.
  345. */
  346. if (user && !access_ok(addr, 8))
  347. goto sigbus;
  348. compute_return_epc(regs);
  349. value = regs->regs[insn.i_format.rt];
  350. StoreDW(addr, value, res);
  351. if (res)
  352. goto fault;
  353. break;
  354. #endif /* CONFIG_64BIT */
  355. /* Cannot handle 64-bit instructions in 32-bit kernel */
  356. goto sigill;
  357. #ifdef CONFIG_MIPS_FP_SUPPORT
  358. case lwc1_op:
  359. case ldc1_op:
  360. case swc1_op:
  361. case sdc1_op:
  362. case cop1x_op: {
  363. void __user *fault_addr = NULL;
  364. die_if_kernel("Unaligned FP access in kernel code", regs);
  365. BUG_ON(!used_math());
  366. res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
  367. &fault_addr);
  368. own_fpu(1); /* Restore FPU state. */
  369. /* Signal if something went wrong. */
  370. process_fpemu_return(res, fault_addr, 0);
  371. if (res == 0)
  372. break;
  373. return;
  374. }
  375. #endif /* CONFIG_MIPS_FP_SUPPORT */
  376. #ifdef CONFIG_CPU_HAS_MSA
  377. case msa_op: {
  378. unsigned int wd, preempted;
  379. enum msa_2b_fmt df;
  380. union fpureg *fpr;
  381. if (!cpu_has_msa)
  382. goto sigill;
  383. /*
  384. * If we've reached this point then userland should have taken
  385. * the MSA disabled exception & initialised vector context at
  386. * some point in the past.
  387. */
  388. BUG_ON(!thread_msa_context_live());
  389. df = insn.msa_mi10_format.df;
  390. wd = insn.msa_mi10_format.wd;
  391. fpr = &current->thread.fpu.fpr[wd];
  392. switch (insn.msa_mi10_format.func) {
  393. case msa_ld_op:
  394. if (!access_ok(addr, sizeof(*fpr)))
  395. goto sigbus;
  396. do {
  397. /*
  398. * If we have live MSA context keep track of
  399. * whether we get preempted in order to avoid
  400. * the register context we load being clobbered
  401. * by the live context as it's saved during
  402. * preemption. If we don't have live context
  403. * then it can't be saved to clobber the value
  404. * we load.
  405. */
  406. preempted = test_thread_flag(TIF_USEDMSA);
  407. res = __copy_from_user_inatomic(fpr, addr,
  408. sizeof(*fpr));
  409. if (res)
  410. goto fault;
  411. /*
  412. * Update the hardware register if it is in use
  413. * by the task in this quantum, in order to
  414. * avoid having to save & restore the whole
  415. * vector context.
  416. */
  417. preempt_disable();
  418. if (test_thread_flag(TIF_USEDMSA)) {
  419. write_msa_wr(wd, fpr, df);
  420. preempted = 0;
  421. }
  422. preempt_enable();
  423. } while (preempted);
  424. break;
  425. case msa_st_op:
  426. if (!access_ok(addr, sizeof(*fpr)))
  427. goto sigbus;
  428. /*
  429. * Update from the hardware register if it is in use by
  430. * the task in this quantum, in order to avoid having to
  431. * save & restore the whole vector context.
  432. */
  433. preempt_disable();
  434. if (test_thread_flag(TIF_USEDMSA))
  435. read_msa_wr(wd, fpr, df);
  436. preempt_enable();
  437. res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
  438. if (res)
  439. goto fault;
  440. break;
  441. default:
  442. goto sigbus;
  443. }
  444. compute_return_epc(regs);
  445. break;
  446. }
  447. #endif /* CONFIG_CPU_HAS_MSA */
  448. #ifndef CONFIG_CPU_MIPSR6
  449. /*
  450. * COP2 is available to implementor for application specific use.
  451. * It's up to applications to register a notifier chain and do
  452. * whatever they have to do, including possible sending of signals.
  453. *
  454. * This instruction has been reallocated in Release 6
  455. */
  456. case lwc2_op:
  457. cu2_notifier_call_chain(CU2_LWC2_OP, regs);
  458. break;
  459. case ldc2_op:
  460. cu2_notifier_call_chain(CU2_LDC2_OP, regs);
  461. break;
  462. case swc2_op:
  463. cu2_notifier_call_chain(CU2_SWC2_OP, regs);
  464. break;
  465. case sdc2_op:
  466. cu2_notifier_call_chain(CU2_SDC2_OP, regs);
  467. break;
  468. #endif
  469. default:
  470. /*
  471. * Pheeee... We encountered an yet unknown instruction or
  472. * cache coherence problem. Die sucker, die ...
  473. */
  474. goto sigill;
  475. }
  476. #ifdef CONFIG_DEBUG_FS
  477. unaligned_instructions++;
  478. #endif
  479. return;
  480. fault:
  481. /* roll back jump/branch */
  482. regs->cp0_epc = origpc;
  483. regs->regs[31] = orig31;
  484. /* Did we have an exception handler installed? */
  485. if (fixup_exception(regs))
  486. return;
  487. die_if_kernel("Unhandled kernel unaligned access", regs);
  488. force_sig(SIGSEGV);
  489. return;
  490. sigbus:
  491. die_if_kernel("Unhandled kernel unaligned access", regs);
  492. force_sig(SIGBUS);
  493. return;
  494. sigill:
  495. die_if_kernel
  496. ("Unhandled kernel unaligned access or invalid instruction", regs);
  497. force_sig(SIGILL);
  498. }
  499. /* Recode table from 16-bit register notation to 32-bit GPR. */
  500. const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
  501. /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
  502. static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
  503. static void emulate_load_store_microMIPS(struct pt_regs *regs,
  504. void __user *addr)
  505. {
  506. unsigned long value;
  507. unsigned int res;
  508. int i;
  509. unsigned int reg = 0, rvar;
  510. unsigned long orig31;
  511. u16 __user *pc16;
  512. u16 halfword;
  513. unsigned int word;
  514. unsigned long origpc, contpc;
  515. union mips_instruction insn;
  516. struct mm_decoded_insn mminsn;
  517. bool user = user_mode(regs);
  518. origpc = regs->cp0_epc;
  519. orig31 = regs->regs[31];
  520. mminsn.micro_mips_mode = 1;
  521. /*
  522. * This load never faults.
  523. */
  524. pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
  525. __get_user(halfword, pc16);
  526. pc16++;
  527. contpc = regs->cp0_epc + 2;
  528. word = ((unsigned int)halfword << 16);
  529. mminsn.pc_inc = 2;
  530. if (!mm_insn_16bit(halfword)) {
  531. __get_user(halfword, pc16);
  532. pc16++;
  533. contpc = regs->cp0_epc + 4;
  534. mminsn.pc_inc = 4;
  535. word |= halfword;
  536. }
  537. mminsn.insn = word;
  538. if (get_user(halfword, pc16))
  539. goto fault;
  540. mminsn.next_pc_inc = 2;
  541. word = ((unsigned int)halfword << 16);
  542. if (!mm_insn_16bit(halfword)) {
  543. pc16++;
  544. if (get_user(halfword, pc16))
  545. goto fault;
  546. mminsn.next_pc_inc = 4;
  547. word |= halfword;
  548. }
  549. mminsn.next_insn = word;
  550. insn = (union mips_instruction)(mminsn.insn);
  551. if (mm_isBranchInstr(regs, mminsn, &contpc))
  552. insn = (union mips_instruction)(mminsn.next_insn);
  553. /* Parse instruction to find what to do */
  554. switch (insn.mm_i_format.opcode) {
  555. case mm_pool32a_op:
  556. switch (insn.mm_x_format.func) {
  557. case mm_lwxs_op:
  558. reg = insn.mm_x_format.rd;
  559. goto loadW;
  560. }
  561. goto sigbus;
  562. case mm_pool32b_op:
  563. switch (insn.mm_m_format.func) {
  564. case mm_lwp_func:
  565. reg = insn.mm_m_format.rd;
  566. if (reg == 31)
  567. goto sigbus;
  568. if (user && !access_ok(addr, 8))
  569. goto sigbus;
  570. LoadW(addr, value, res);
  571. if (res)
  572. goto fault;
  573. regs->regs[reg] = value;
  574. addr += 4;
  575. LoadW(addr, value, res);
  576. if (res)
  577. goto fault;
  578. regs->regs[reg + 1] = value;
  579. goto success;
  580. case mm_swp_func:
  581. reg = insn.mm_m_format.rd;
  582. if (reg == 31)
  583. goto sigbus;
  584. if (user && !access_ok(addr, 8))
  585. goto sigbus;
  586. value = regs->regs[reg];
  587. StoreW(addr, value, res);
  588. if (res)
  589. goto fault;
  590. addr += 4;
  591. value = regs->regs[reg + 1];
  592. StoreW(addr, value, res);
  593. if (res)
  594. goto fault;
  595. goto success;
  596. case mm_ldp_func:
  597. #ifdef CONFIG_64BIT
  598. reg = insn.mm_m_format.rd;
  599. if (reg == 31)
  600. goto sigbus;
  601. if (user && !access_ok(addr, 16))
  602. goto sigbus;
  603. LoadDW(addr, value, res);
  604. if (res)
  605. goto fault;
  606. regs->regs[reg] = value;
  607. addr += 8;
  608. LoadDW(addr, value, res);
  609. if (res)
  610. goto fault;
  611. regs->regs[reg + 1] = value;
  612. goto success;
  613. #endif /* CONFIG_64BIT */
  614. goto sigill;
  615. case mm_sdp_func:
  616. #ifdef CONFIG_64BIT
  617. reg = insn.mm_m_format.rd;
  618. if (reg == 31)
  619. goto sigbus;
  620. if (user && !access_ok(addr, 16))
  621. goto sigbus;
  622. value = regs->regs[reg];
  623. StoreDW(addr, value, res);
  624. if (res)
  625. goto fault;
  626. addr += 8;
  627. value = regs->regs[reg + 1];
  628. StoreDW(addr, value, res);
  629. if (res)
  630. goto fault;
  631. goto success;
  632. #endif /* CONFIG_64BIT */
  633. goto sigill;
  634. case mm_lwm32_func:
  635. reg = insn.mm_m_format.rd;
  636. rvar = reg & 0xf;
  637. if ((rvar > 9) || !reg)
  638. goto sigill;
  639. if (reg & 0x10) {
  640. if (user && !access_ok(addr, 4 * (rvar + 1)))
  641. goto sigbus;
  642. } else {
  643. if (user && !access_ok(addr, 4 * rvar))
  644. goto sigbus;
  645. }
  646. if (rvar == 9)
  647. rvar = 8;
  648. for (i = 16; rvar; rvar--, i++) {
  649. LoadW(addr, value, res);
  650. if (res)
  651. goto fault;
  652. addr += 4;
  653. regs->regs[i] = value;
  654. }
  655. if ((reg & 0xf) == 9) {
  656. LoadW(addr, value, res);
  657. if (res)
  658. goto fault;
  659. addr += 4;
  660. regs->regs[30] = value;
  661. }
  662. if (reg & 0x10) {
  663. LoadW(addr, value, res);
  664. if (res)
  665. goto fault;
  666. regs->regs[31] = value;
  667. }
  668. goto success;
  669. case mm_swm32_func:
  670. reg = insn.mm_m_format.rd;
  671. rvar = reg & 0xf;
  672. if ((rvar > 9) || !reg)
  673. goto sigill;
  674. if (reg & 0x10) {
  675. if (user && !access_ok(addr, 4 * (rvar + 1)))
  676. goto sigbus;
  677. } else {
  678. if (user && !access_ok(addr, 4 * rvar))
  679. goto sigbus;
  680. }
  681. if (rvar == 9)
  682. rvar = 8;
  683. for (i = 16; rvar; rvar--, i++) {
  684. value = regs->regs[i];
  685. StoreW(addr, value, res);
  686. if (res)
  687. goto fault;
  688. addr += 4;
  689. }
  690. if ((reg & 0xf) == 9) {
  691. value = regs->regs[30];
  692. StoreW(addr, value, res);
  693. if (res)
  694. goto fault;
  695. addr += 4;
  696. }
  697. if (reg & 0x10) {
  698. value = regs->regs[31];
  699. StoreW(addr, value, res);
  700. if (res)
  701. goto fault;
  702. }
  703. goto success;
  704. case mm_ldm_func:
  705. #ifdef CONFIG_64BIT
  706. reg = insn.mm_m_format.rd;
  707. rvar = reg & 0xf;
  708. if ((rvar > 9) || !reg)
  709. goto sigill;
  710. if (reg & 0x10) {
  711. if (user && !access_ok(addr, 8 * (rvar + 1)))
  712. goto sigbus;
  713. } else {
  714. if (user && !access_ok(addr, 8 * rvar))
  715. goto sigbus;
  716. }
  717. if (rvar == 9)
  718. rvar = 8;
  719. for (i = 16; rvar; rvar--, i++) {
  720. LoadDW(addr, value, res);
  721. if (res)
  722. goto fault;
  723. addr += 4;
  724. regs->regs[i] = value;
  725. }
  726. if ((reg & 0xf) == 9) {
  727. LoadDW(addr, value, res);
  728. if (res)
  729. goto fault;
  730. addr += 8;
  731. regs->regs[30] = value;
  732. }
  733. if (reg & 0x10) {
  734. LoadDW(addr, value, res);
  735. if (res)
  736. goto fault;
  737. regs->regs[31] = value;
  738. }
  739. goto success;
  740. #endif /* CONFIG_64BIT */
  741. goto sigill;
  742. case mm_sdm_func:
  743. #ifdef CONFIG_64BIT
  744. reg = insn.mm_m_format.rd;
  745. rvar = reg & 0xf;
  746. if ((rvar > 9) || !reg)
  747. goto sigill;
  748. if (reg & 0x10) {
  749. if (user && !access_ok(addr, 8 * (rvar + 1)))
  750. goto sigbus;
  751. } else {
  752. if (user && !access_ok(addr, 8 * rvar))
  753. goto sigbus;
  754. }
  755. if (rvar == 9)
  756. rvar = 8;
  757. for (i = 16; rvar; rvar--, i++) {
  758. value = regs->regs[i];
  759. StoreDW(addr, value, res);
  760. if (res)
  761. goto fault;
  762. addr += 8;
  763. }
  764. if ((reg & 0xf) == 9) {
  765. value = regs->regs[30];
  766. StoreDW(addr, value, res);
  767. if (res)
  768. goto fault;
  769. addr += 8;
  770. }
  771. if (reg & 0x10) {
  772. value = regs->regs[31];
  773. StoreDW(addr, value, res);
  774. if (res)
  775. goto fault;
  776. }
  777. goto success;
  778. #endif /* CONFIG_64BIT */
  779. goto sigill;
  780. /* LWC2, SWC2, LDC2, SDC2 are not serviced */
  781. }
  782. goto sigbus;
  783. case mm_pool32c_op:
  784. switch (insn.mm_m_format.func) {
  785. case mm_lwu_func:
  786. reg = insn.mm_m_format.rd;
  787. goto loadWU;
  788. }
  789. /* LL,SC,LLD,SCD are not serviced */
  790. goto sigbus;
  791. #ifdef CONFIG_MIPS_FP_SUPPORT
  792. case mm_pool32f_op:
  793. switch (insn.mm_x_format.func) {
  794. case mm_lwxc1_func:
  795. case mm_swxc1_func:
  796. case mm_ldxc1_func:
  797. case mm_sdxc1_func:
  798. goto fpu_emul;
  799. }
  800. goto sigbus;
  801. case mm_ldc132_op:
  802. case mm_sdc132_op:
  803. case mm_lwc132_op:
  804. case mm_swc132_op: {
  805. void __user *fault_addr = NULL;
  806. fpu_emul:
  807. /* roll back jump/branch */
  808. regs->cp0_epc = origpc;
  809. regs->regs[31] = orig31;
  810. die_if_kernel("Unaligned FP access in kernel code", regs);
  811. BUG_ON(!used_math());
  812. BUG_ON(!is_fpu_owner());
  813. res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
  814. &fault_addr);
  815. own_fpu(1); /* restore FPU state */
  816. /* If something went wrong, signal */
  817. process_fpemu_return(res, fault_addr, 0);
  818. if (res == 0)
  819. goto success;
  820. return;
  821. }
  822. #endif /* CONFIG_MIPS_FP_SUPPORT */
  823. case mm_lh32_op:
  824. reg = insn.mm_i_format.rt;
  825. goto loadHW;
  826. case mm_lhu32_op:
  827. reg = insn.mm_i_format.rt;
  828. goto loadHWU;
  829. case mm_lw32_op:
  830. reg = insn.mm_i_format.rt;
  831. goto loadW;
  832. case mm_sh32_op:
  833. reg = insn.mm_i_format.rt;
  834. goto storeHW;
  835. case mm_sw32_op:
  836. reg = insn.mm_i_format.rt;
  837. goto storeW;
  838. case mm_ld32_op:
  839. reg = insn.mm_i_format.rt;
  840. goto loadDW;
  841. case mm_sd32_op:
  842. reg = insn.mm_i_format.rt;
  843. goto storeDW;
  844. case mm_pool16c_op:
  845. switch (insn.mm16_m_format.func) {
  846. case mm_lwm16_op:
  847. reg = insn.mm16_m_format.rlist;
  848. rvar = reg + 1;
  849. if (user && !access_ok(addr, 4 * rvar))
  850. goto sigbus;
  851. for (i = 16; rvar; rvar--, i++) {
  852. LoadW(addr, value, res);
  853. if (res)
  854. goto fault;
  855. addr += 4;
  856. regs->regs[i] = value;
  857. }
  858. LoadW(addr, value, res);
  859. if (res)
  860. goto fault;
  861. regs->regs[31] = value;
  862. goto success;
  863. case mm_swm16_op:
  864. reg = insn.mm16_m_format.rlist;
  865. rvar = reg + 1;
  866. if (user && !access_ok(addr, 4 * rvar))
  867. goto sigbus;
  868. for (i = 16; rvar; rvar--, i++) {
  869. value = regs->regs[i];
  870. StoreW(addr, value, res);
  871. if (res)
  872. goto fault;
  873. addr += 4;
  874. }
  875. value = regs->regs[31];
  876. StoreW(addr, value, res);
  877. if (res)
  878. goto fault;
  879. goto success;
  880. }
  881. goto sigbus;
  882. case mm_lhu16_op:
  883. reg = reg16to32[insn.mm16_rb_format.rt];
  884. goto loadHWU;
  885. case mm_lw16_op:
  886. reg = reg16to32[insn.mm16_rb_format.rt];
  887. goto loadW;
  888. case mm_sh16_op:
  889. reg = reg16to32st[insn.mm16_rb_format.rt];
  890. goto storeHW;
  891. case mm_sw16_op:
  892. reg = reg16to32st[insn.mm16_rb_format.rt];
  893. goto storeW;
  894. case mm_lwsp16_op:
  895. reg = insn.mm16_r5_format.rt;
  896. goto loadW;
  897. case mm_swsp16_op:
  898. reg = insn.mm16_r5_format.rt;
  899. goto storeW;
  900. case mm_lwgp16_op:
  901. reg = reg16to32[insn.mm16_r3_format.rt];
  902. goto loadW;
  903. default:
  904. goto sigill;
  905. }
  906. loadHW:
  907. if (user && !access_ok(addr, 2))
  908. goto sigbus;
  909. LoadHW(addr, value, res);
  910. if (res)
  911. goto fault;
  912. regs->regs[reg] = value;
  913. goto success;
  914. loadHWU:
  915. if (user && !access_ok(addr, 2))
  916. goto sigbus;
  917. LoadHWU(addr, value, res);
  918. if (res)
  919. goto fault;
  920. regs->regs[reg] = value;
  921. goto success;
  922. loadW:
  923. if (user && !access_ok(addr, 4))
  924. goto sigbus;
  925. LoadW(addr, value, res);
  926. if (res)
  927. goto fault;
  928. regs->regs[reg] = value;
  929. goto success;
  930. loadWU:
  931. #ifdef CONFIG_64BIT
  932. /*
  933. * A 32-bit kernel might be running on a 64-bit processor. But
  934. * if we're on a 32-bit processor and an i-cache incoherency
  935. * or race makes us see a 64-bit instruction here the sdl/sdr
  936. * would blow up, so for now we don't handle unaligned 64-bit
  937. * instructions on 32-bit kernels.
  938. */
  939. if (user && !access_ok(addr, 4))
  940. goto sigbus;
  941. LoadWU(addr, value, res);
  942. if (res)
  943. goto fault;
  944. regs->regs[reg] = value;
  945. goto success;
  946. #endif /* CONFIG_64BIT */
  947. /* Cannot handle 64-bit instructions in 32-bit kernel */
  948. goto sigill;
  949. loadDW:
  950. #ifdef CONFIG_64BIT
  951. /*
  952. * A 32-bit kernel might be running on a 64-bit processor. But
  953. * if we're on a 32-bit processor and an i-cache incoherency
  954. * or race makes us see a 64-bit instruction here the sdl/sdr
  955. * would blow up, so for now we don't handle unaligned 64-bit
  956. * instructions on 32-bit kernels.
  957. */
  958. if (user && !access_ok(addr, 8))
  959. goto sigbus;
  960. LoadDW(addr, value, res);
  961. if (res)
  962. goto fault;
  963. regs->regs[reg] = value;
  964. goto success;
  965. #endif /* CONFIG_64BIT */
  966. /* Cannot handle 64-bit instructions in 32-bit kernel */
  967. goto sigill;
  968. storeHW:
  969. if (user && !access_ok(addr, 2))
  970. goto sigbus;
  971. value = regs->regs[reg];
  972. StoreHW(addr, value, res);
  973. if (res)
  974. goto fault;
  975. goto success;
  976. storeW:
  977. if (user && !access_ok(addr, 4))
  978. goto sigbus;
  979. value = regs->regs[reg];
  980. StoreW(addr, value, res);
  981. if (res)
  982. goto fault;
  983. goto success;
  984. storeDW:
  985. #ifdef CONFIG_64BIT
  986. /*
  987. * A 32-bit kernel might be running on a 64-bit processor. But
  988. * if we're on a 32-bit processor and an i-cache incoherency
  989. * or race makes us see a 64-bit instruction here the sdl/sdr
  990. * would blow up, so for now we don't handle unaligned 64-bit
  991. * instructions on 32-bit kernels.
  992. */
  993. if (user && !access_ok(addr, 8))
  994. goto sigbus;
  995. value = regs->regs[reg];
  996. StoreDW(addr, value, res);
  997. if (res)
  998. goto fault;
  999. goto success;
  1000. #endif /* CONFIG_64BIT */
  1001. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1002. goto sigill;
  1003. success:
  1004. regs->cp0_epc = contpc; /* advance or branch */
  1005. #ifdef CONFIG_DEBUG_FS
  1006. unaligned_instructions++;
  1007. #endif
  1008. return;
  1009. fault:
  1010. /* roll back jump/branch */
  1011. regs->cp0_epc = origpc;
  1012. regs->regs[31] = orig31;
  1013. /* Did we have an exception handler installed? */
  1014. if (fixup_exception(regs))
  1015. return;
  1016. die_if_kernel("Unhandled kernel unaligned access", regs);
  1017. force_sig(SIGSEGV);
  1018. return;
  1019. sigbus:
  1020. die_if_kernel("Unhandled kernel unaligned access", regs);
  1021. force_sig(SIGBUS);
  1022. return;
  1023. sigill:
  1024. die_if_kernel
  1025. ("Unhandled kernel unaligned access or invalid instruction", regs);
  1026. force_sig(SIGILL);
  1027. }
  1028. static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
  1029. {
  1030. unsigned long value;
  1031. unsigned int res;
  1032. int reg;
  1033. unsigned long orig31;
  1034. u16 __user *pc16;
  1035. unsigned long origpc;
  1036. union mips16e_instruction mips16inst, oldinst;
  1037. unsigned int opcode;
  1038. int extended = 0;
  1039. bool user = user_mode(regs);
  1040. origpc = regs->cp0_epc;
  1041. orig31 = regs->regs[31];
  1042. pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
  1043. /*
  1044. * This load never faults.
  1045. */
  1046. __get_user(mips16inst.full, pc16);
  1047. oldinst = mips16inst;
  1048. /* skip EXTEND instruction */
  1049. if (mips16inst.ri.opcode == MIPS16e_extend_op) {
  1050. extended = 1;
  1051. pc16++;
  1052. __get_user(mips16inst.full, pc16);
  1053. } else if (delay_slot(regs)) {
  1054. /* skip jump instructions */
  1055. /* JAL/JALX are 32 bits but have OPCODE in first short int */
  1056. if (mips16inst.ri.opcode == MIPS16e_jal_op)
  1057. pc16++;
  1058. pc16++;
  1059. if (get_user(mips16inst.full, pc16))
  1060. goto sigbus;
  1061. }
  1062. opcode = mips16inst.ri.opcode;
  1063. switch (opcode) {
  1064. case MIPS16e_i64_op: /* I64 or RI64 instruction */
  1065. switch (mips16inst.i64.func) { /* I64/RI64 func field check */
  1066. case MIPS16e_ldpc_func:
  1067. case MIPS16e_ldsp_func:
  1068. reg = reg16to32[mips16inst.ri64.ry];
  1069. goto loadDW;
  1070. case MIPS16e_sdsp_func:
  1071. reg = reg16to32[mips16inst.ri64.ry];
  1072. goto writeDW;
  1073. case MIPS16e_sdrasp_func:
  1074. reg = 29; /* GPRSP */
  1075. goto writeDW;
  1076. }
  1077. goto sigbus;
  1078. case MIPS16e_swsp_op:
  1079. reg = reg16to32[mips16inst.ri.rx];
  1080. if (extended && cpu_has_mips16e2)
  1081. switch (mips16inst.ri.imm >> 5) {
  1082. case 0: /* SWSP */
  1083. case 1: /* SWGP */
  1084. break;
  1085. case 2: /* SHGP */
  1086. opcode = MIPS16e_sh_op;
  1087. break;
  1088. default:
  1089. goto sigbus;
  1090. }
  1091. break;
  1092. case MIPS16e_lwpc_op:
  1093. reg = reg16to32[mips16inst.ri.rx];
  1094. break;
  1095. case MIPS16e_lwsp_op:
  1096. reg = reg16to32[mips16inst.ri.rx];
  1097. if (extended && cpu_has_mips16e2)
  1098. switch (mips16inst.ri.imm >> 5) {
  1099. case 0: /* LWSP */
  1100. case 1: /* LWGP */
  1101. break;
  1102. case 2: /* LHGP */
  1103. opcode = MIPS16e_lh_op;
  1104. break;
  1105. case 4: /* LHUGP */
  1106. opcode = MIPS16e_lhu_op;
  1107. break;
  1108. default:
  1109. goto sigbus;
  1110. }
  1111. break;
  1112. case MIPS16e_i8_op:
  1113. if (mips16inst.i8.func != MIPS16e_swrasp_func)
  1114. goto sigbus;
  1115. reg = 29; /* GPRSP */
  1116. break;
  1117. default:
  1118. reg = reg16to32[mips16inst.rri.ry];
  1119. break;
  1120. }
  1121. switch (opcode) {
  1122. case MIPS16e_lb_op:
  1123. case MIPS16e_lbu_op:
  1124. case MIPS16e_sb_op:
  1125. goto sigbus;
  1126. case MIPS16e_lh_op:
  1127. if (user && !access_ok(addr, 2))
  1128. goto sigbus;
  1129. LoadHW(addr, value, res);
  1130. if (res)
  1131. goto fault;
  1132. MIPS16e_compute_return_epc(regs, &oldinst);
  1133. regs->regs[reg] = value;
  1134. break;
  1135. case MIPS16e_lhu_op:
  1136. if (user && !access_ok(addr, 2))
  1137. goto sigbus;
  1138. LoadHWU(addr, value, res);
  1139. if (res)
  1140. goto fault;
  1141. MIPS16e_compute_return_epc(regs, &oldinst);
  1142. regs->regs[reg] = value;
  1143. break;
  1144. case MIPS16e_lw_op:
  1145. case MIPS16e_lwpc_op:
  1146. case MIPS16e_lwsp_op:
  1147. if (user && !access_ok(addr, 4))
  1148. goto sigbus;
  1149. LoadW(addr, value, res);
  1150. if (res)
  1151. goto fault;
  1152. MIPS16e_compute_return_epc(regs, &oldinst);
  1153. regs->regs[reg] = value;
  1154. break;
  1155. case MIPS16e_lwu_op:
  1156. #ifdef CONFIG_64BIT
  1157. /*
  1158. * A 32-bit kernel might be running on a 64-bit processor. But
  1159. * if we're on a 32-bit processor and an i-cache incoherency
  1160. * or race makes us see a 64-bit instruction here the sdl/sdr
  1161. * would blow up, so for now we don't handle unaligned 64-bit
  1162. * instructions on 32-bit kernels.
  1163. */
  1164. if (user && !access_ok(addr, 4))
  1165. goto sigbus;
  1166. LoadWU(addr, value, res);
  1167. if (res)
  1168. goto fault;
  1169. MIPS16e_compute_return_epc(regs, &oldinst);
  1170. regs->regs[reg] = value;
  1171. break;
  1172. #endif /* CONFIG_64BIT */
  1173. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1174. goto sigill;
  1175. case MIPS16e_ld_op:
  1176. loadDW:
  1177. #ifdef CONFIG_64BIT
  1178. /*
  1179. * A 32-bit kernel might be running on a 64-bit processor. But
  1180. * if we're on a 32-bit processor and an i-cache incoherency
  1181. * or race makes us see a 64-bit instruction here the sdl/sdr
  1182. * would blow up, so for now we don't handle unaligned 64-bit
  1183. * instructions on 32-bit kernels.
  1184. */
  1185. if (user && !access_ok(addr, 8))
  1186. goto sigbus;
  1187. LoadDW(addr, value, res);
  1188. if (res)
  1189. goto fault;
  1190. MIPS16e_compute_return_epc(regs, &oldinst);
  1191. regs->regs[reg] = value;
  1192. break;
  1193. #endif /* CONFIG_64BIT */
  1194. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1195. goto sigill;
  1196. case MIPS16e_sh_op:
  1197. if (user && !access_ok(addr, 2))
  1198. goto sigbus;
  1199. MIPS16e_compute_return_epc(regs, &oldinst);
  1200. value = regs->regs[reg];
  1201. StoreHW(addr, value, res);
  1202. if (res)
  1203. goto fault;
  1204. break;
  1205. case MIPS16e_sw_op:
  1206. case MIPS16e_swsp_op:
  1207. case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
  1208. if (user && !access_ok(addr, 4))
  1209. goto sigbus;
  1210. MIPS16e_compute_return_epc(regs, &oldinst);
  1211. value = regs->regs[reg];
  1212. StoreW(addr, value, res);
  1213. if (res)
  1214. goto fault;
  1215. break;
  1216. case MIPS16e_sd_op:
  1217. writeDW:
  1218. #ifdef CONFIG_64BIT
  1219. /*
  1220. * A 32-bit kernel might be running on a 64-bit processor. But
  1221. * if we're on a 32-bit processor and an i-cache incoherency
  1222. * or race makes us see a 64-bit instruction here the sdl/sdr
  1223. * would blow up, so for now we don't handle unaligned 64-bit
  1224. * instructions on 32-bit kernels.
  1225. */
  1226. if (user && !access_ok(addr, 8))
  1227. goto sigbus;
  1228. MIPS16e_compute_return_epc(regs, &oldinst);
  1229. value = regs->regs[reg];
  1230. StoreDW(addr, value, res);
  1231. if (res)
  1232. goto fault;
  1233. break;
  1234. #endif /* CONFIG_64BIT */
  1235. /* Cannot handle 64-bit instructions in 32-bit kernel */
  1236. goto sigill;
  1237. default:
  1238. /*
  1239. * Pheeee... We encountered an yet unknown instruction or
  1240. * cache coherence problem. Die sucker, die ...
  1241. */
  1242. goto sigill;
  1243. }
  1244. #ifdef CONFIG_DEBUG_FS
  1245. unaligned_instructions++;
  1246. #endif
  1247. return;
  1248. fault:
  1249. /* roll back jump/branch */
  1250. regs->cp0_epc = origpc;
  1251. regs->regs[31] = orig31;
  1252. /* Did we have an exception handler installed? */
  1253. if (fixup_exception(regs))
  1254. return;
  1255. die_if_kernel("Unhandled kernel unaligned access", regs);
  1256. force_sig(SIGSEGV);
  1257. return;
  1258. sigbus:
  1259. die_if_kernel("Unhandled kernel unaligned access", regs);
  1260. force_sig(SIGBUS);
  1261. return;
  1262. sigill:
  1263. die_if_kernel
  1264. ("Unhandled kernel unaligned access or invalid instruction", regs);
  1265. force_sig(SIGILL);
  1266. }
  1267. asmlinkage void do_ade(struct pt_regs *regs)
  1268. {
  1269. enum ctx_state prev_state;
  1270. unsigned int *pc;
  1271. prev_state = exception_enter();
  1272. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
  1273. 1, regs, regs->cp0_badvaddr);
  1274. #ifdef CONFIG_64BIT
  1275. /*
  1276. * check, if we are hitting space between CPU implemented maximum
  1277. * virtual user address and 64bit maximum virtual user address
  1278. * and do exception handling to get EFAULTs for get_user/put_user
  1279. */
  1280. if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
  1281. (regs->cp0_badvaddr < XKSSEG)) {
  1282. if (fixup_exception(regs)) {
  1283. current->thread.cp0_baduaddr = regs->cp0_badvaddr;
  1284. return;
  1285. }
  1286. goto sigbus;
  1287. }
  1288. #endif
  1289. /*
  1290. * Did we catch a fault trying to load an instruction?
  1291. */
  1292. if (regs->cp0_badvaddr == regs->cp0_epc)
  1293. goto sigbus;
  1294. if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
  1295. goto sigbus;
  1296. if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
  1297. goto sigbus;
  1298. /*
  1299. * Do branch emulation only if we didn't forward the exception.
  1300. * This is all so but ugly ...
  1301. */
  1302. /*
  1303. * Are we running in microMIPS mode?
  1304. */
  1305. if (get_isa16_mode(regs->cp0_epc)) {
  1306. /*
  1307. * Did we catch a fault trying to load an instruction in
  1308. * 16-bit mode?
  1309. */
  1310. if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
  1311. goto sigbus;
  1312. if (unaligned_action == UNALIGNED_ACTION_SHOW)
  1313. show_registers(regs);
  1314. if (cpu_has_mmips) {
  1315. emulate_load_store_microMIPS(regs,
  1316. (void __user *)regs->cp0_badvaddr);
  1317. return;
  1318. }
  1319. if (cpu_has_mips16) {
  1320. emulate_load_store_MIPS16e(regs,
  1321. (void __user *)regs->cp0_badvaddr);
  1322. return;
  1323. }
  1324. goto sigbus;
  1325. }
  1326. if (unaligned_action == UNALIGNED_ACTION_SHOW)
  1327. show_registers(regs);
  1328. pc = (unsigned int *)exception_epc(regs);
  1329. emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
  1330. return;
  1331. sigbus:
  1332. die_if_kernel("Kernel unaligned instruction access", regs);
  1333. force_sig(SIGBUS);
  1334. /*
  1335. * XXX On return from the signal handler we should advance the epc
  1336. */
  1337. exception_exit(prev_state);
  1338. }
  1339. #ifdef CONFIG_DEBUG_FS
  1340. static int __init debugfs_unaligned(void)
  1341. {
  1342. debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
  1343. &unaligned_instructions);
  1344. debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
  1345. mips_debugfs_dir, &unaligned_action);
  1346. return 0;
  1347. }
  1348. arch_initcall(debugfs_unaligned);
  1349. #endif