mips-r2-to-r6-emul.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2014 Imagination Technologies Ltd.
  7. * Author: Leonid Yegoshin <[email protected]>
  8. * Author: Markos Chandras <[email protected]>
  9. *
  10. * MIPS R2 user space instruction emulator for MIPS R6
  11. *
  12. */
  13. #include <linux/bug.h>
  14. #include <linux/compiler.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/init.h>
  17. #include <linux/kernel.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/seq_file.h>
  20. #include <asm/asm.h>
  21. #include <asm/branch.h>
  22. #include <asm/break.h>
  23. #include <asm/debug.h>
  24. #include <asm/fpu.h>
  25. #include <asm/fpu_emulator.h>
  26. #include <asm/inst.h>
  27. #include <asm/mips-r2-to-r6-emul.h>
  28. #include <asm/local.h>
  29. #include <asm/mipsregs.h>
  30. #include <asm/ptrace.h>
  31. #include <linux/uaccess.h>
  32. #ifdef CONFIG_64BIT
  33. #define ADDIU "daddiu "
  34. #define INS "dins "
  35. #define EXT "dext "
  36. #else
  37. #define ADDIU "addiu "
  38. #define INS "ins "
  39. #define EXT "ext "
  40. #endif /* CONFIG_64BIT */
  41. #define SB "sb "
  42. #define LB "lb "
  43. #define LL "ll "
  44. #define SC "sc "
  45. #ifdef CONFIG_DEBUG_FS
  46. static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
  47. static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
  48. static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
  49. #endif
  50. extern const unsigned int fpucondbit[8];
  51. #define MIPS_R2_EMUL_TOTAL_PASS 10
  52. int mipsr2_emulation = 0;
  53. static int __init mipsr2emu_enable(char *s)
  54. {
  55. mipsr2_emulation = 1;
  56. pr_info("MIPS R2-to-R6 Emulator Enabled!");
  57. return 1;
  58. }
  59. __setup("mipsr2emu", mipsr2emu_enable);
  60. /**
  61. * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
  62. * for performance instead of the traditional way of using a stack trampoline
  63. * which is rather slow.
  64. * @regs: Process register set
  65. * @ir: Instruction
  66. */
  67. static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
  68. {
  69. switch (MIPSInst_OPCODE(ir)) {
  70. case addiu_op:
  71. if (MIPSInst_RT(ir))
  72. regs->regs[MIPSInst_RT(ir)] =
  73. (s32)regs->regs[MIPSInst_RS(ir)] +
  74. (s32)MIPSInst_SIMM(ir);
  75. return 0;
  76. case daddiu_op:
  77. if (IS_ENABLED(CONFIG_32BIT))
  78. break;
  79. if (MIPSInst_RT(ir))
  80. regs->regs[MIPSInst_RT(ir)] =
  81. (s64)regs->regs[MIPSInst_RS(ir)] +
  82. (s64)MIPSInst_SIMM(ir);
  83. return 0;
  84. case lwc1_op:
  85. case swc1_op:
  86. case cop1_op:
  87. case cop1x_op:
  88. /* FPU instructions in delay slot */
  89. return -SIGFPE;
  90. case spec_op:
  91. switch (MIPSInst_FUNC(ir)) {
  92. case or_op:
  93. if (MIPSInst_RD(ir))
  94. regs->regs[MIPSInst_RD(ir)] =
  95. regs->regs[MIPSInst_RS(ir)] |
  96. regs->regs[MIPSInst_RT(ir)];
  97. return 0;
  98. case sll_op:
  99. if (MIPSInst_RS(ir))
  100. break;
  101. if (MIPSInst_RD(ir))
  102. regs->regs[MIPSInst_RD(ir)] =
  103. (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
  104. MIPSInst_FD(ir));
  105. return 0;
  106. case srl_op:
  107. if (MIPSInst_RS(ir))
  108. break;
  109. if (MIPSInst_RD(ir))
  110. regs->regs[MIPSInst_RD(ir)] =
  111. (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
  112. MIPSInst_FD(ir));
  113. return 0;
  114. case addu_op:
  115. if (MIPSInst_FD(ir))
  116. break;
  117. if (MIPSInst_RD(ir))
  118. regs->regs[MIPSInst_RD(ir)] =
  119. (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
  120. (u32)regs->regs[MIPSInst_RT(ir)]);
  121. return 0;
  122. case subu_op:
  123. if (MIPSInst_FD(ir))
  124. break;
  125. if (MIPSInst_RD(ir))
  126. regs->regs[MIPSInst_RD(ir)] =
  127. (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
  128. (u32)regs->regs[MIPSInst_RT(ir)]);
  129. return 0;
  130. case dsll_op:
  131. if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
  132. break;
  133. if (MIPSInst_RD(ir))
  134. regs->regs[MIPSInst_RD(ir)] =
  135. (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
  136. MIPSInst_FD(ir));
  137. return 0;
  138. case dsrl_op:
  139. if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
  140. break;
  141. if (MIPSInst_RD(ir))
  142. regs->regs[MIPSInst_RD(ir)] =
  143. (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
  144. MIPSInst_FD(ir));
  145. return 0;
  146. case daddu_op:
  147. if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
  148. break;
  149. if (MIPSInst_RD(ir))
  150. regs->regs[MIPSInst_RD(ir)] =
  151. (u64)regs->regs[MIPSInst_RS(ir)] +
  152. (u64)regs->regs[MIPSInst_RT(ir)];
  153. return 0;
  154. case dsubu_op:
  155. if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
  156. break;
  157. if (MIPSInst_RD(ir))
  158. regs->regs[MIPSInst_RD(ir)] =
  159. (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
  160. (u64)regs->regs[MIPSInst_RT(ir)]);
  161. return 0;
  162. }
  163. break;
  164. default:
  165. pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
  166. ir, MIPSInst_OPCODE(ir));
  167. }
  168. return SIGILL;
  169. }
  170. /**
  171. * movf_func - Emulate a MOVF instruction
  172. * @regs: Process register set
  173. * @ir: Instruction
  174. *
  175. * Returns 0 since it always succeeds.
  176. */
  177. static int movf_func(struct pt_regs *regs, u32 ir)
  178. {
  179. u32 csr;
  180. u32 cond;
  181. csr = current->thread.fpu.fcr31;
  182. cond = fpucondbit[MIPSInst_RT(ir) >> 2];
  183. if (((csr & cond) == 0) && MIPSInst_RD(ir))
  184. regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
  185. MIPS_R2_STATS(movs);
  186. return 0;
  187. }
  188. /**
  189. * movt_func - Emulate a MOVT instruction
  190. * @regs: Process register set
  191. * @ir: Instruction
  192. *
  193. * Returns 0 since it always succeeds.
  194. */
  195. static int movt_func(struct pt_regs *regs, u32 ir)
  196. {
  197. u32 csr;
  198. u32 cond;
  199. csr = current->thread.fpu.fcr31;
  200. cond = fpucondbit[MIPSInst_RT(ir) >> 2];
  201. if (((csr & cond) != 0) && MIPSInst_RD(ir))
  202. regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
  203. MIPS_R2_STATS(movs);
  204. return 0;
  205. }
  206. /**
  207. * jr_func - Emulate a JR instruction.
  208. * @pt_regs: Process register set
  209. * @ir: Instruction
  210. *
  211. * Returns SIGILL if JR was in delay slot, SIGEMT if we
  212. * can't compute the EPC, SIGSEGV if we can't access the
  213. * userland instruction or 0 on success.
  214. */
  215. static int jr_func(struct pt_regs *regs, u32 ir)
  216. {
  217. int err;
  218. unsigned long cepc, epc, nepc;
  219. u32 nir;
  220. if (delay_slot(regs))
  221. return SIGILL;
  222. /* EPC after the RI/JR instruction */
  223. nepc = regs->cp0_epc;
  224. /* Roll back to the reserved R2 JR instruction */
  225. regs->cp0_epc -= 4;
  226. epc = regs->cp0_epc;
  227. err = __compute_return_epc(regs);
  228. if (err < 0)
  229. return SIGEMT;
  230. /* Computed EPC */
  231. cepc = regs->cp0_epc;
  232. /* Get DS instruction */
  233. err = __get_user(nir, (u32 __user *)nepc);
  234. if (err)
  235. return SIGSEGV;
  236. MIPS_R2BR_STATS(jrs);
  237. /* If nir == 0(NOP), then nothing else to do */
  238. if (nir) {
  239. /*
  240. * Negative err means FPU instruction in BD-slot,
  241. * Zero err means 'BD-slot emulation done'
  242. * For anything else we go back to trampoline emulation.
  243. */
  244. err = mipsr6_emul(regs, nir);
  245. if (err > 0) {
  246. regs->cp0_epc = nepc;
  247. err = mips_dsemul(regs, nir, epc, cepc);
  248. if (err == SIGILL)
  249. err = SIGEMT;
  250. MIPS_R2_STATS(dsemul);
  251. }
  252. }
  253. return err;
  254. }
  255. /**
  256. * movz_func - Emulate a MOVZ instruction
  257. * @regs: Process register set
  258. * @ir: Instruction
  259. *
  260. * Returns 0 since it always succeeds.
  261. */
  262. static int movz_func(struct pt_regs *regs, u32 ir)
  263. {
  264. if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
  265. regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
  266. MIPS_R2_STATS(movs);
  267. return 0;
  268. }
  269. /**
  270. * movn_func - Emulate a MOVZ instruction
  271. * @regs: Process register set
  272. * @ir: Instruction
  273. *
  274. * Returns 0 since it always succeeds.
  275. */
  276. static int movn_func(struct pt_regs *regs, u32 ir)
  277. {
  278. if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
  279. regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
  280. MIPS_R2_STATS(movs);
  281. return 0;
  282. }
  283. /**
  284. * mfhi_func - Emulate a MFHI instruction
  285. * @regs: Process register set
  286. * @ir: Instruction
  287. *
  288. * Returns 0 since it always succeeds.
  289. */
  290. static int mfhi_func(struct pt_regs *regs, u32 ir)
  291. {
  292. if (MIPSInst_RD(ir))
  293. regs->regs[MIPSInst_RD(ir)] = regs->hi;
  294. MIPS_R2_STATS(hilo);
  295. return 0;
  296. }
  297. /**
  298. * mthi_func - Emulate a MTHI instruction
  299. * @regs: Process register set
  300. * @ir: Instruction
  301. *
  302. * Returns 0 since it always succeeds.
  303. */
  304. static int mthi_func(struct pt_regs *regs, u32 ir)
  305. {
  306. regs->hi = regs->regs[MIPSInst_RS(ir)];
  307. MIPS_R2_STATS(hilo);
  308. return 0;
  309. }
  310. /**
  311. * mflo_func - Emulate a MFLO instruction
  312. * @regs: Process register set
  313. * @ir: Instruction
  314. *
  315. * Returns 0 since it always succeeds.
  316. */
  317. static int mflo_func(struct pt_regs *regs, u32 ir)
  318. {
  319. if (MIPSInst_RD(ir))
  320. regs->regs[MIPSInst_RD(ir)] = regs->lo;
  321. MIPS_R2_STATS(hilo);
  322. return 0;
  323. }
  324. /**
  325. * mtlo_func - Emulate a MTLO instruction
  326. * @regs: Process register set
  327. * @ir: Instruction
  328. *
  329. * Returns 0 since it always succeeds.
  330. */
  331. static int mtlo_func(struct pt_regs *regs, u32 ir)
  332. {
  333. regs->lo = regs->regs[MIPSInst_RS(ir)];
  334. MIPS_R2_STATS(hilo);
  335. return 0;
  336. }
  337. /**
  338. * mult_func - Emulate a MULT instruction
  339. * @regs: Process register set
  340. * @ir: Instruction
  341. *
  342. * Returns 0 since it always succeeds.
  343. */
  344. static int mult_func(struct pt_regs *regs, u32 ir)
  345. {
  346. s64 res;
  347. s32 rt, rs;
  348. rt = regs->regs[MIPSInst_RT(ir)];
  349. rs = regs->regs[MIPSInst_RS(ir)];
  350. res = (s64)rt * (s64)rs;
  351. rs = res;
  352. regs->lo = (s64)rs;
  353. rt = res >> 32;
  354. res = (s64)rt;
  355. regs->hi = res;
  356. MIPS_R2_STATS(muls);
  357. return 0;
  358. }
  359. /**
  360. * multu_func - Emulate a MULTU instruction
  361. * @regs: Process register set
  362. * @ir: Instruction
  363. *
  364. * Returns 0 since it always succeeds.
  365. */
  366. static int multu_func(struct pt_regs *regs, u32 ir)
  367. {
  368. u64 res;
  369. u32 rt, rs;
  370. rt = regs->regs[MIPSInst_RT(ir)];
  371. rs = regs->regs[MIPSInst_RS(ir)];
  372. res = (u64)rt * (u64)rs;
  373. rt = res;
  374. regs->lo = (s64)(s32)rt;
  375. regs->hi = (s64)(s32)(res >> 32);
  376. MIPS_R2_STATS(muls);
  377. return 0;
  378. }
  379. /**
  380. * div_func - Emulate a DIV instruction
  381. * @regs: Process register set
  382. * @ir: Instruction
  383. *
  384. * Returns 0 since it always succeeds.
  385. */
  386. static int div_func(struct pt_regs *regs, u32 ir)
  387. {
  388. s32 rt, rs;
  389. rt = regs->regs[MIPSInst_RT(ir)];
  390. rs = regs->regs[MIPSInst_RS(ir)];
  391. regs->lo = (s64)(rs / rt);
  392. regs->hi = (s64)(rs % rt);
  393. MIPS_R2_STATS(divs);
  394. return 0;
  395. }
  396. /**
  397. * divu_func - Emulate a DIVU instruction
  398. * @regs: Process register set
  399. * @ir: Instruction
  400. *
  401. * Returns 0 since it always succeeds.
  402. */
  403. static int divu_func(struct pt_regs *regs, u32 ir)
  404. {
  405. u32 rt, rs;
  406. rt = regs->regs[MIPSInst_RT(ir)];
  407. rs = regs->regs[MIPSInst_RS(ir)];
  408. regs->lo = (s64)(rs / rt);
  409. regs->hi = (s64)(rs % rt);
  410. MIPS_R2_STATS(divs);
  411. return 0;
  412. }
  413. /**
  414. * dmult_func - Emulate a DMULT instruction
  415. * @regs: Process register set
  416. * @ir: Instruction
  417. *
  418. * Returns 0 on success or SIGILL for 32-bit kernels.
  419. */
  420. static int dmult_func(struct pt_regs *regs, u32 ir)
  421. {
  422. s64 res;
  423. s64 rt, rs;
  424. if (IS_ENABLED(CONFIG_32BIT))
  425. return SIGILL;
  426. rt = regs->regs[MIPSInst_RT(ir)];
  427. rs = regs->regs[MIPSInst_RS(ir)];
  428. res = rt * rs;
  429. regs->lo = res;
  430. __asm__ __volatile__(
  431. "dmuh %0, %1, %2\t\n"
  432. : "=r"(res)
  433. : "r"(rt), "r"(rs));
  434. regs->hi = res;
  435. MIPS_R2_STATS(muls);
  436. return 0;
  437. }
  438. /**
  439. * dmultu_func - Emulate a DMULTU instruction
  440. * @regs: Process register set
  441. * @ir: Instruction
  442. *
  443. * Returns 0 on success or SIGILL for 32-bit kernels.
  444. */
  445. static int dmultu_func(struct pt_regs *regs, u32 ir)
  446. {
  447. u64 res;
  448. u64 rt, rs;
  449. if (IS_ENABLED(CONFIG_32BIT))
  450. return SIGILL;
  451. rt = regs->regs[MIPSInst_RT(ir)];
  452. rs = regs->regs[MIPSInst_RS(ir)];
  453. res = rt * rs;
  454. regs->lo = res;
  455. __asm__ __volatile__(
  456. "dmuhu %0, %1, %2\t\n"
  457. : "=r"(res)
  458. : "r"(rt), "r"(rs));
  459. regs->hi = res;
  460. MIPS_R2_STATS(muls);
  461. return 0;
  462. }
  463. /**
  464. * ddiv_func - Emulate a DDIV instruction
  465. * @regs: Process register set
  466. * @ir: Instruction
  467. *
  468. * Returns 0 on success or SIGILL for 32-bit kernels.
  469. */
  470. static int ddiv_func(struct pt_regs *regs, u32 ir)
  471. {
  472. s64 rt, rs;
  473. if (IS_ENABLED(CONFIG_32BIT))
  474. return SIGILL;
  475. rt = regs->regs[MIPSInst_RT(ir)];
  476. rs = regs->regs[MIPSInst_RS(ir)];
  477. regs->lo = rs / rt;
  478. regs->hi = rs % rt;
  479. MIPS_R2_STATS(divs);
  480. return 0;
  481. }
  482. /**
  483. * ddivu_func - Emulate a DDIVU instruction
  484. * @regs: Process register set
  485. * @ir: Instruction
  486. *
  487. * Returns 0 on success or SIGILL for 32-bit kernels.
  488. */
  489. static int ddivu_func(struct pt_regs *regs, u32 ir)
  490. {
  491. u64 rt, rs;
  492. if (IS_ENABLED(CONFIG_32BIT))
  493. return SIGILL;
  494. rt = regs->regs[MIPSInst_RT(ir)];
  495. rs = regs->regs[MIPSInst_RS(ir)];
  496. regs->lo = rs / rt;
  497. regs->hi = rs % rt;
  498. MIPS_R2_STATS(divs);
  499. return 0;
  500. }
  501. /* R6 removed instructions for the SPECIAL opcode */
  502. static const struct r2_decoder_table spec_op_table[] = {
  503. { 0xfc1ff83f, 0x00000008, jr_func },
  504. { 0xfc00ffff, 0x00000018, mult_func },
  505. { 0xfc00ffff, 0x00000019, multu_func },
  506. { 0xfc00ffff, 0x0000001c, dmult_func },
  507. { 0xfc00ffff, 0x0000001d, dmultu_func },
  508. { 0xffff07ff, 0x00000010, mfhi_func },
  509. { 0xfc1fffff, 0x00000011, mthi_func },
  510. { 0xffff07ff, 0x00000012, mflo_func },
  511. { 0xfc1fffff, 0x00000013, mtlo_func },
  512. { 0xfc0307ff, 0x00000001, movf_func },
  513. { 0xfc0307ff, 0x00010001, movt_func },
  514. { 0xfc0007ff, 0x0000000a, movz_func },
  515. { 0xfc0007ff, 0x0000000b, movn_func },
  516. { 0xfc00ffff, 0x0000001a, div_func },
  517. { 0xfc00ffff, 0x0000001b, divu_func },
  518. { 0xfc00ffff, 0x0000001e, ddiv_func },
  519. { 0xfc00ffff, 0x0000001f, ddivu_func },
  520. {}
  521. };
  522. /**
  523. * madd_func - Emulate a MADD instruction
  524. * @regs: Process register set
  525. * @ir: Instruction
  526. *
  527. * Returns 0 since it always succeeds.
  528. */
  529. static int madd_func(struct pt_regs *regs, u32 ir)
  530. {
  531. s64 res;
  532. s32 rt, rs;
  533. rt = regs->regs[MIPSInst_RT(ir)];
  534. rs = regs->regs[MIPSInst_RS(ir)];
  535. res = (s64)rt * (s64)rs;
  536. rt = regs->hi;
  537. rs = regs->lo;
  538. res += ((((s64)rt) << 32) | (u32)rs);
  539. rt = res;
  540. regs->lo = (s64)rt;
  541. rs = res >> 32;
  542. regs->hi = (s64)rs;
  543. MIPS_R2_STATS(dsps);
  544. return 0;
  545. }
  546. /**
  547. * maddu_func - Emulate a MADDU instruction
  548. * @regs: Process register set
  549. * @ir: Instruction
  550. *
  551. * Returns 0 since it always succeeds.
  552. */
  553. static int maddu_func(struct pt_regs *regs, u32 ir)
  554. {
  555. u64 res;
  556. u32 rt, rs;
  557. rt = regs->regs[MIPSInst_RT(ir)];
  558. rs = regs->regs[MIPSInst_RS(ir)];
  559. res = (u64)rt * (u64)rs;
  560. rt = regs->hi;
  561. rs = regs->lo;
  562. res += ((((s64)rt) << 32) | (u32)rs);
  563. rt = res;
  564. regs->lo = (s64)(s32)rt;
  565. rs = res >> 32;
  566. regs->hi = (s64)(s32)rs;
  567. MIPS_R2_STATS(dsps);
  568. return 0;
  569. }
  570. /**
  571. * msub_func - Emulate a MSUB instruction
  572. * @regs: Process register set
  573. * @ir: Instruction
  574. *
  575. * Returns 0 since it always succeeds.
  576. */
  577. static int msub_func(struct pt_regs *regs, u32 ir)
  578. {
  579. s64 res;
  580. s32 rt, rs;
  581. rt = regs->regs[MIPSInst_RT(ir)];
  582. rs = regs->regs[MIPSInst_RS(ir)];
  583. res = (s64)rt * (s64)rs;
  584. rt = regs->hi;
  585. rs = regs->lo;
  586. res = ((((s64)rt) << 32) | (u32)rs) - res;
  587. rt = res;
  588. regs->lo = (s64)rt;
  589. rs = res >> 32;
  590. regs->hi = (s64)rs;
  591. MIPS_R2_STATS(dsps);
  592. return 0;
  593. }
  594. /**
  595. * msubu_func - Emulate a MSUBU instruction
  596. * @regs: Process register set
  597. * @ir: Instruction
  598. *
  599. * Returns 0 since it always succeeds.
  600. */
  601. static int msubu_func(struct pt_regs *regs, u32 ir)
  602. {
  603. u64 res;
  604. u32 rt, rs;
  605. rt = regs->regs[MIPSInst_RT(ir)];
  606. rs = regs->regs[MIPSInst_RS(ir)];
  607. res = (u64)rt * (u64)rs;
  608. rt = regs->hi;
  609. rs = regs->lo;
  610. res = ((((s64)rt) << 32) | (u32)rs) - res;
  611. rt = res;
  612. regs->lo = (s64)(s32)rt;
  613. rs = res >> 32;
  614. regs->hi = (s64)(s32)rs;
  615. MIPS_R2_STATS(dsps);
  616. return 0;
  617. }
  618. /**
  619. * mul_func - Emulate a MUL instruction
  620. * @regs: Process register set
  621. * @ir: Instruction
  622. *
  623. * Returns 0 since it always succeeds.
  624. */
  625. static int mul_func(struct pt_regs *regs, u32 ir)
  626. {
  627. s64 res;
  628. s32 rt, rs;
  629. if (!MIPSInst_RD(ir))
  630. return 0;
  631. rt = regs->regs[MIPSInst_RT(ir)];
  632. rs = regs->regs[MIPSInst_RS(ir)];
  633. res = (s64)rt * (s64)rs;
  634. rs = res;
  635. regs->regs[MIPSInst_RD(ir)] = (s64)rs;
  636. MIPS_R2_STATS(muls);
  637. return 0;
  638. }
  639. /**
  640. * clz_func - Emulate a CLZ instruction
  641. * @regs: Process register set
  642. * @ir: Instruction
  643. *
  644. * Returns 0 since it always succeeds.
  645. */
  646. static int clz_func(struct pt_regs *regs, u32 ir)
  647. {
  648. u32 res;
  649. u32 rs;
  650. if (!MIPSInst_RD(ir))
  651. return 0;
  652. rs = regs->regs[MIPSInst_RS(ir)];
  653. __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
  654. regs->regs[MIPSInst_RD(ir)] = res;
  655. MIPS_R2_STATS(bops);
  656. return 0;
  657. }
  658. /**
  659. * clo_func - Emulate a CLO instruction
  660. * @regs: Process register set
  661. * @ir: Instruction
  662. *
  663. * Returns 0 since it always succeeds.
  664. */
  665. static int clo_func(struct pt_regs *regs, u32 ir)
  666. {
  667. u32 res;
  668. u32 rs;
  669. if (!MIPSInst_RD(ir))
  670. return 0;
  671. rs = regs->regs[MIPSInst_RS(ir)];
  672. __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
  673. regs->regs[MIPSInst_RD(ir)] = res;
  674. MIPS_R2_STATS(bops);
  675. return 0;
  676. }
  677. /**
  678. * dclz_func - Emulate a DCLZ instruction
  679. * @regs: Process register set
  680. * @ir: Instruction
  681. *
  682. * Returns 0 since it always succeeds.
  683. */
  684. static int dclz_func(struct pt_regs *regs, u32 ir)
  685. {
  686. u64 res;
  687. u64 rs;
  688. if (IS_ENABLED(CONFIG_32BIT))
  689. return SIGILL;
  690. if (!MIPSInst_RD(ir))
  691. return 0;
  692. rs = regs->regs[MIPSInst_RS(ir)];
  693. __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
  694. regs->regs[MIPSInst_RD(ir)] = res;
  695. MIPS_R2_STATS(bops);
  696. return 0;
  697. }
  698. /**
  699. * dclo_func - Emulate a DCLO instruction
  700. * @regs: Process register set
  701. * @ir: Instruction
  702. *
  703. * Returns 0 since it always succeeds.
  704. */
  705. static int dclo_func(struct pt_regs *regs, u32 ir)
  706. {
  707. u64 res;
  708. u64 rs;
  709. if (IS_ENABLED(CONFIG_32BIT))
  710. return SIGILL;
  711. if (!MIPSInst_RD(ir))
  712. return 0;
  713. rs = regs->regs[MIPSInst_RS(ir)];
  714. __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
  715. regs->regs[MIPSInst_RD(ir)] = res;
  716. MIPS_R2_STATS(bops);
  717. return 0;
  718. }
  719. /* R6 removed instructions for the SPECIAL2 opcode */
  720. static const struct r2_decoder_table spec2_op_table[] = {
  721. { 0xfc00ffff, 0x70000000, madd_func },
  722. { 0xfc00ffff, 0x70000001, maddu_func },
  723. { 0xfc0007ff, 0x70000002, mul_func },
  724. { 0xfc00ffff, 0x70000004, msub_func },
  725. { 0xfc00ffff, 0x70000005, msubu_func },
  726. { 0xfc0007ff, 0x70000020, clz_func },
  727. { 0xfc0007ff, 0x70000021, clo_func },
  728. { 0xfc0007ff, 0x70000024, dclz_func },
  729. { 0xfc0007ff, 0x70000025, dclo_func },
  730. { }
  731. };
  732. static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
  733. const struct r2_decoder_table *table)
  734. {
  735. const struct r2_decoder_table *p;
  736. int err;
  737. for (p = table; p->func; p++) {
  738. if ((inst & p->mask) == p->code) {
  739. err = (p->func)(regs, inst);
  740. return err;
  741. }
  742. }
  743. return SIGILL;
  744. }
  745. /**
  746. * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
  747. * @regs: Process register set
  748. * @inst: Instruction to decode and emulate
  749. * @fcr31: Floating Point Control and Status Register Cause bits returned
  750. */
  751. int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
  752. {
  753. int err = 0;
  754. unsigned long vaddr;
  755. u32 nir;
  756. unsigned long cpc, epc, nepc, r31, res, rs, rt;
  757. void __user *fault_addr = NULL;
  758. int pass = 0;
  759. repeat:
  760. r31 = regs->regs[31];
  761. epc = regs->cp0_epc;
  762. err = compute_return_epc(regs);
  763. if (err < 0) {
  764. BUG();
  765. return SIGEMT;
  766. }
  767. pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
  768. inst, epc, pass);
  769. switch (MIPSInst_OPCODE(inst)) {
  770. case spec_op:
  771. err = mipsr2_find_op_func(regs, inst, spec_op_table);
  772. if (err < 0) {
  773. /* FPU instruction under JR */
  774. regs->cp0_cause |= CAUSEF_BD;
  775. goto fpu_emul;
  776. }
  777. break;
  778. case spec2_op:
  779. err = mipsr2_find_op_func(regs, inst, spec2_op_table);
  780. break;
  781. case bcond_op:
  782. rt = MIPSInst_RT(inst);
  783. rs = MIPSInst_RS(inst);
  784. switch (rt) {
  785. case tgei_op:
  786. if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
  787. do_trap_or_bp(regs, 0, 0, "TGEI");
  788. MIPS_R2_STATS(traps);
  789. break;
  790. case tgeiu_op:
  791. if (regs->regs[rs] >= MIPSInst_UIMM(inst))
  792. do_trap_or_bp(regs, 0, 0, "TGEIU");
  793. MIPS_R2_STATS(traps);
  794. break;
  795. case tlti_op:
  796. if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
  797. do_trap_or_bp(regs, 0, 0, "TLTI");
  798. MIPS_R2_STATS(traps);
  799. break;
  800. case tltiu_op:
  801. if (regs->regs[rs] < MIPSInst_UIMM(inst))
  802. do_trap_or_bp(regs, 0, 0, "TLTIU");
  803. MIPS_R2_STATS(traps);
  804. break;
  805. case teqi_op:
  806. if (regs->regs[rs] == MIPSInst_SIMM(inst))
  807. do_trap_or_bp(regs, 0, 0, "TEQI");
  808. MIPS_R2_STATS(traps);
  809. break;
  810. case tnei_op:
  811. if (regs->regs[rs] != MIPSInst_SIMM(inst))
  812. do_trap_or_bp(regs, 0, 0, "TNEI");
  813. MIPS_R2_STATS(traps);
  814. break;
  815. case bltzl_op:
  816. case bgezl_op:
  817. case bltzall_op:
  818. case bgezall_op:
  819. if (delay_slot(regs)) {
  820. err = SIGILL;
  821. break;
  822. }
  823. regs->regs[31] = r31;
  824. regs->cp0_epc = epc;
  825. err = __compute_return_epc(regs);
  826. if (err < 0)
  827. return SIGEMT;
  828. if (err != BRANCH_LIKELY_TAKEN)
  829. break;
  830. cpc = regs->cp0_epc;
  831. nepc = epc + 4;
  832. err = __get_user(nir, (u32 __user *)nepc);
  833. if (err) {
  834. err = SIGSEGV;
  835. break;
  836. }
  837. /*
  838. * This will probably be optimized away when
  839. * CONFIG_DEBUG_FS is not enabled
  840. */
  841. switch (rt) {
  842. case bltzl_op:
  843. MIPS_R2BR_STATS(bltzl);
  844. break;
  845. case bgezl_op:
  846. MIPS_R2BR_STATS(bgezl);
  847. break;
  848. case bltzall_op:
  849. MIPS_R2BR_STATS(bltzall);
  850. break;
  851. case bgezall_op:
  852. MIPS_R2BR_STATS(bgezall);
  853. break;
  854. }
  855. switch (MIPSInst_OPCODE(nir)) {
  856. case cop1_op:
  857. case cop1x_op:
  858. case lwc1_op:
  859. case swc1_op:
  860. regs->cp0_cause |= CAUSEF_BD;
  861. goto fpu_emul;
  862. }
  863. if (nir) {
  864. err = mipsr6_emul(regs, nir);
  865. if (err > 0) {
  866. err = mips_dsemul(regs, nir, epc, cpc);
  867. if (err == SIGILL)
  868. err = SIGEMT;
  869. MIPS_R2_STATS(dsemul);
  870. }
  871. }
  872. break;
  873. case bltzal_op:
  874. case bgezal_op:
  875. if (delay_slot(regs)) {
  876. err = SIGILL;
  877. break;
  878. }
  879. regs->regs[31] = r31;
  880. regs->cp0_epc = epc;
  881. err = __compute_return_epc(regs);
  882. if (err < 0)
  883. return SIGEMT;
  884. cpc = regs->cp0_epc;
  885. nepc = epc + 4;
  886. err = __get_user(nir, (u32 __user *)nepc);
  887. if (err) {
  888. err = SIGSEGV;
  889. break;
  890. }
  891. /*
  892. * This will probably be optimized away when
  893. * CONFIG_DEBUG_FS is not enabled
  894. */
  895. switch (rt) {
  896. case bltzal_op:
  897. MIPS_R2BR_STATS(bltzal);
  898. break;
  899. case bgezal_op:
  900. MIPS_R2BR_STATS(bgezal);
  901. break;
  902. }
  903. switch (MIPSInst_OPCODE(nir)) {
  904. case cop1_op:
  905. case cop1x_op:
  906. case lwc1_op:
  907. case swc1_op:
  908. regs->cp0_cause |= CAUSEF_BD;
  909. goto fpu_emul;
  910. }
  911. if (nir) {
  912. err = mipsr6_emul(regs, nir);
  913. if (err > 0) {
  914. err = mips_dsemul(regs, nir, epc, cpc);
  915. if (err == SIGILL)
  916. err = SIGEMT;
  917. MIPS_R2_STATS(dsemul);
  918. }
  919. }
  920. break;
  921. default:
  922. regs->regs[31] = r31;
  923. regs->cp0_epc = epc;
  924. err = SIGILL;
  925. break;
  926. }
  927. break;
  928. case blezl_op:
  929. case bgtzl_op:
  930. /*
  931. * For BLEZL and BGTZL, rt field must be set to 0. If this
  932. * is not the case, this may be an encoding of a MIPS R6
  933. * instruction, so return to CPU execution if this occurs
  934. */
  935. if (MIPSInst_RT(inst)) {
  936. err = SIGILL;
  937. break;
  938. }
  939. fallthrough;
  940. case beql_op:
  941. case bnel_op:
  942. if (delay_slot(regs)) {
  943. err = SIGILL;
  944. break;
  945. }
  946. regs->regs[31] = r31;
  947. regs->cp0_epc = epc;
  948. err = __compute_return_epc(regs);
  949. if (err < 0)
  950. return SIGEMT;
  951. if (err != BRANCH_LIKELY_TAKEN)
  952. break;
  953. cpc = regs->cp0_epc;
  954. nepc = epc + 4;
  955. err = __get_user(nir, (u32 __user *)nepc);
  956. if (err) {
  957. err = SIGSEGV;
  958. break;
  959. }
  960. /*
  961. * This will probably be optimized away when
  962. * CONFIG_DEBUG_FS is not enabled
  963. */
  964. switch (MIPSInst_OPCODE(inst)) {
  965. case beql_op:
  966. MIPS_R2BR_STATS(beql);
  967. break;
  968. case bnel_op:
  969. MIPS_R2BR_STATS(bnel);
  970. break;
  971. case blezl_op:
  972. MIPS_R2BR_STATS(blezl);
  973. break;
  974. case bgtzl_op:
  975. MIPS_R2BR_STATS(bgtzl);
  976. break;
  977. }
  978. switch (MIPSInst_OPCODE(nir)) {
  979. case cop1_op:
  980. case cop1x_op:
  981. case lwc1_op:
  982. case swc1_op:
  983. regs->cp0_cause |= CAUSEF_BD;
  984. goto fpu_emul;
  985. }
  986. if (nir) {
  987. err = mipsr6_emul(regs, nir);
  988. if (err > 0) {
  989. err = mips_dsemul(regs, nir, epc, cpc);
  990. if (err == SIGILL)
  991. err = SIGEMT;
  992. MIPS_R2_STATS(dsemul);
  993. }
  994. }
  995. break;
  996. case lwc1_op:
  997. case swc1_op:
  998. case cop1_op:
  999. case cop1x_op:
  1000. fpu_emul:
  1001. regs->regs[31] = r31;
  1002. regs->cp0_epc = epc;
  1003. err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
  1004. &fault_addr);
  1005. /*
  1006. * We can't allow the emulated instruction to leave any
  1007. * enabled Cause bits set in $fcr31.
  1008. */
  1009. *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
  1010. current->thread.fpu.fcr31 &= ~res;
  1011. /*
  1012. * this is a tricky issue - lose_fpu() uses LL/SC atomics
  1013. * if FPU is owned and effectively cancels user level LL/SC.
  1014. * So, it could be logical to don't restore FPU ownership here.
  1015. * But the sequence of multiple FPU instructions is much much
  1016. * more often than LL-FPU-SC and I prefer loop here until
  1017. * next scheduler cycle cancels FPU ownership
  1018. */
  1019. own_fpu(1); /* Restore FPU state. */
  1020. if (err)
  1021. current->thread.cp0_baduaddr = (unsigned long)fault_addr;
  1022. MIPS_R2_STATS(fpus);
  1023. break;
  1024. case lwl_op:
  1025. rt = regs->regs[MIPSInst_RT(inst)];
  1026. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1027. if (!access_ok((void __user *)vaddr, 4)) {
  1028. current->thread.cp0_baduaddr = vaddr;
  1029. err = SIGSEGV;
  1030. break;
  1031. }
  1032. __asm__ __volatile__(
  1033. " .set push\n"
  1034. " .set reorder\n"
  1035. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1036. "1:" LB "%1, 0(%2)\n"
  1037. INS "%0, %1, 24, 8\n"
  1038. " andi %1, %2, 0x3\n"
  1039. " beq $0, %1, 9f\n"
  1040. ADDIU "%2, %2, -1\n"
  1041. "2:" LB "%1, 0(%2)\n"
  1042. INS "%0, %1, 16, 8\n"
  1043. " andi %1, %2, 0x3\n"
  1044. " beq $0, %1, 9f\n"
  1045. ADDIU "%2, %2, -1\n"
  1046. "3:" LB "%1, 0(%2)\n"
  1047. INS "%0, %1, 8, 8\n"
  1048. " andi %1, %2, 0x3\n"
  1049. " beq $0, %1, 9f\n"
  1050. ADDIU "%2, %2, -1\n"
  1051. "4:" LB "%1, 0(%2)\n"
  1052. INS "%0, %1, 0, 8\n"
  1053. #else /* !CONFIG_CPU_LITTLE_ENDIAN */
  1054. "1:" LB "%1, 0(%2)\n"
  1055. INS "%0, %1, 24, 8\n"
  1056. ADDIU "%2, %2, 1\n"
  1057. " andi %1, %2, 0x3\n"
  1058. " beq $0, %1, 9f\n"
  1059. "2:" LB "%1, 0(%2)\n"
  1060. INS "%0, %1, 16, 8\n"
  1061. ADDIU "%2, %2, 1\n"
  1062. " andi %1, %2, 0x3\n"
  1063. " beq $0, %1, 9f\n"
  1064. "3:" LB "%1, 0(%2)\n"
  1065. INS "%0, %1, 8, 8\n"
  1066. ADDIU "%2, %2, 1\n"
  1067. " andi %1, %2, 0x3\n"
  1068. " beq $0, %1, 9f\n"
  1069. "4:" LB "%1, 0(%2)\n"
  1070. INS "%0, %1, 0, 8\n"
  1071. #endif /* CONFIG_CPU_LITTLE_ENDIAN */
  1072. "9: sll %0, %0, 0\n"
  1073. "10:\n"
  1074. " .insn\n"
  1075. " .section .fixup,\"ax\"\n"
  1076. "8: li %3,%4\n"
  1077. " j 10b\n"
  1078. " .previous\n"
  1079. " .section __ex_table,\"a\"\n"
  1080. STR(PTR_WD) " 1b,8b\n"
  1081. STR(PTR_WD) " 2b,8b\n"
  1082. STR(PTR_WD) " 3b,8b\n"
  1083. STR(PTR_WD) " 4b,8b\n"
  1084. " .previous\n"
  1085. " .set pop\n"
  1086. : "+&r"(rt), "=&r"(rs),
  1087. "+&r"(vaddr), "+&r"(err)
  1088. : "i"(SIGSEGV));
  1089. if (MIPSInst_RT(inst) && !err)
  1090. regs->regs[MIPSInst_RT(inst)] = rt;
  1091. MIPS_R2_STATS(loads);
  1092. break;
  1093. case lwr_op:
  1094. rt = regs->regs[MIPSInst_RT(inst)];
  1095. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1096. if (!access_ok((void __user *)vaddr, 4)) {
  1097. current->thread.cp0_baduaddr = vaddr;
  1098. err = SIGSEGV;
  1099. break;
  1100. }
  1101. __asm__ __volatile__(
  1102. " .set push\n"
  1103. " .set reorder\n"
  1104. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1105. "1:" LB "%1, 0(%2)\n"
  1106. INS "%0, %1, 0, 8\n"
  1107. ADDIU "%2, %2, 1\n"
  1108. " andi %1, %2, 0x3\n"
  1109. " beq $0, %1, 9f\n"
  1110. "2:" LB "%1, 0(%2)\n"
  1111. INS "%0, %1, 8, 8\n"
  1112. ADDIU "%2, %2, 1\n"
  1113. " andi %1, %2, 0x3\n"
  1114. " beq $0, %1, 9f\n"
  1115. "3:" LB "%1, 0(%2)\n"
  1116. INS "%0, %1, 16, 8\n"
  1117. ADDIU "%2, %2, 1\n"
  1118. " andi %1, %2, 0x3\n"
  1119. " beq $0, %1, 9f\n"
  1120. "4:" LB "%1, 0(%2)\n"
  1121. INS "%0, %1, 24, 8\n"
  1122. " sll %0, %0, 0\n"
  1123. #else /* !CONFIG_CPU_LITTLE_ENDIAN */
  1124. "1:" LB "%1, 0(%2)\n"
  1125. INS "%0, %1, 0, 8\n"
  1126. " andi %1, %2, 0x3\n"
  1127. " beq $0, %1, 9f\n"
  1128. ADDIU "%2, %2, -1\n"
  1129. "2:" LB "%1, 0(%2)\n"
  1130. INS "%0, %1, 8, 8\n"
  1131. " andi %1, %2, 0x3\n"
  1132. " beq $0, %1, 9f\n"
  1133. ADDIU "%2, %2, -1\n"
  1134. "3:" LB "%1, 0(%2)\n"
  1135. INS "%0, %1, 16, 8\n"
  1136. " andi %1, %2, 0x3\n"
  1137. " beq $0, %1, 9f\n"
  1138. ADDIU "%2, %2, -1\n"
  1139. "4:" LB "%1, 0(%2)\n"
  1140. INS "%0, %1, 24, 8\n"
  1141. " sll %0, %0, 0\n"
  1142. #endif /* CONFIG_CPU_LITTLE_ENDIAN */
  1143. "9:\n"
  1144. "10:\n"
  1145. " .insn\n"
  1146. " .section .fixup,\"ax\"\n"
  1147. "8: li %3,%4\n"
  1148. " j 10b\n"
  1149. " .previous\n"
  1150. " .section __ex_table,\"a\"\n"
  1151. STR(PTR_WD) " 1b,8b\n"
  1152. STR(PTR_WD) " 2b,8b\n"
  1153. STR(PTR_WD) " 3b,8b\n"
  1154. STR(PTR_WD) " 4b,8b\n"
  1155. " .previous\n"
  1156. " .set pop\n"
  1157. : "+&r"(rt), "=&r"(rs),
  1158. "+&r"(vaddr), "+&r"(err)
  1159. : "i"(SIGSEGV));
  1160. if (MIPSInst_RT(inst) && !err)
  1161. regs->regs[MIPSInst_RT(inst)] = rt;
  1162. MIPS_R2_STATS(loads);
  1163. break;
  1164. case swl_op:
  1165. rt = regs->regs[MIPSInst_RT(inst)];
  1166. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1167. if (!access_ok((void __user *)vaddr, 4)) {
  1168. current->thread.cp0_baduaddr = vaddr;
  1169. err = SIGSEGV;
  1170. break;
  1171. }
  1172. __asm__ __volatile__(
  1173. " .set push\n"
  1174. " .set reorder\n"
  1175. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1176. EXT "%1, %0, 24, 8\n"
  1177. "1:" SB "%1, 0(%2)\n"
  1178. " andi %1, %2, 0x3\n"
  1179. " beq $0, %1, 9f\n"
  1180. ADDIU "%2, %2, -1\n"
  1181. EXT "%1, %0, 16, 8\n"
  1182. "2:" SB "%1, 0(%2)\n"
  1183. " andi %1, %2, 0x3\n"
  1184. " beq $0, %1, 9f\n"
  1185. ADDIU "%2, %2, -1\n"
  1186. EXT "%1, %0, 8, 8\n"
  1187. "3:" SB "%1, 0(%2)\n"
  1188. " andi %1, %2, 0x3\n"
  1189. " beq $0, %1, 9f\n"
  1190. ADDIU "%2, %2, -1\n"
  1191. EXT "%1, %0, 0, 8\n"
  1192. "4:" SB "%1, 0(%2)\n"
  1193. #else /* !CONFIG_CPU_LITTLE_ENDIAN */
  1194. EXT "%1, %0, 24, 8\n"
  1195. "1:" SB "%1, 0(%2)\n"
  1196. ADDIU "%2, %2, 1\n"
  1197. " andi %1, %2, 0x3\n"
  1198. " beq $0, %1, 9f\n"
  1199. EXT "%1, %0, 16, 8\n"
  1200. "2:" SB "%1, 0(%2)\n"
  1201. ADDIU "%2, %2, 1\n"
  1202. " andi %1, %2, 0x3\n"
  1203. " beq $0, %1, 9f\n"
  1204. EXT "%1, %0, 8, 8\n"
  1205. "3:" SB "%1, 0(%2)\n"
  1206. ADDIU "%2, %2, 1\n"
  1207. " andi %1, %2, 0x3\n"
  1208. " beq $0, %1, 9f\n"
  1209. EXT "%1, %0, 0, 8\n"
  1210. "4:" SB "%1, 0(%2)\n"
  1211. #endif /* CONFIG_CPU_LITTLE_ENDIAN */
  1212. "9:\n"
  1213. " .insn\n"
  1214. " .section .fixup,\"ax\"\n"
  1215. "8: li %3,%4\n"
  1216. " j 9b\n"
  1217. " .previous\n"
  1218. " .section __ex_table,\"a\"\n"
  1219. STR(PTR_WD) " 1b,8b\n"
  1220. STR(PTR_WD) " 2b,8b\n"
  1221. STR(PTR_WD) " 3b,8b\n"
  1222. STR(PTR_WD) " 4b,8b\n"
  1223. " .previous\n"
  1224. " .set pop\n"
  1225. : "+&r"(rt), "=&r"(rs),
  1226. "+&r"(vaddr), "+&r"(err)
  1227. : "i"(SIGSEGV)
  1228. : "memory");
  1229. MIPS_R2_STATS(stores);
  1230. break;
  1231. case swr_op:
  1232. rt = regs->regs[MIPSInst_RT(inst)];
  1233. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1234. if (!access_ok((void __user *)vaddr, 4)) {
  1235. current->thread.cp0_baduaddr = vaddr;
  1236. err = SIGSEGV;
  1237. break;
  1238. }
  1239. __asm__ __volatile__(
  1240. " .set push\n"
  1241. " .set reorder\n"
  1242. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1243. EXT "%1, %0, 0, 8\n"
  1244. "1:" SB "%1, 0(%2)\n"
  1245. ADDIU "%2, %2, 1\n"
  1246. " andi %1, %2, 0x3\n"
  1247. " beq $0, %1, 9f\n"
  1248. EXT "%1, %0, 8, 8\n"
  1249. "2:" SB "%1, 0(%2)\n"
  1250. ADDIU "%2, %2, 1\n"
  1251. " andi %1, %2, 0x3\n"
  1252. " beq $0, %1, 9f\n"
  1253. EXT "%1, %0, 16, 8\n"
  1254. "3:" SB "%1, 0(%2)\n"
  1255. ADDIU "%2, %2, 1\n"
  1256. " andi %1, %2, 0x3\n"
  1257. " beq $0, %1, 9f\n"
  1258. EXT "%1, %0, 24, 8\n"
  1259. "4:" SB "%1, 0(%2)\n"
  1260. #else /* !CONFIG_CPU_LITTLE_ENDIAN */
  1261. EXT "%1, %0, 0, 8\n"
  1262. "1:" SB "%1, 0(%2)\n"
  1263. " andi %1, %2, 0x3\n"
  1264. " beq $0, %1, 9f\n"
  1265. ADDIU "%2, %2, -1\n"
  1266. EXT "%1, %0, 8, 8\n"
  1267. "2:" SB "%1, 0(%2)\n"
  1268. " andi %1, %2, 0x3\n"
  1269. " beq $0, %1, 9f\n"
  1270. ADDIU "%2, %2, -1\n"
  1271. EXT "%1, %0, 16, 8\n"
  1272. "3:" SB "%1, 0(%2)\n"
  1273. " andi %1, %2, 0x3\n"
  1274. " beq $0, %1, 9f\n"
  1275. ADDIU "%2, %2, -1\n"
  1276. EXT "%1, %0, 24, 8\n"
  1277. "4:" SB "%1, 0(%2)\n"
  1278. #endif /* CONFIG_CPU_LITTLE_ENDIAN */
  1279. "9:\n"
  1280. " .insn\n"
  1281. " .section .fixup,\"ax\"\n"
  1282. "8: li %3,%4\n"
  1283. " j 9b\n"
  1284. " .previous\n"
  1285. " .section __ex_table,\"a\"\n"
  1286. STR(PTR_WD) " 1b,8b\n"
  1287. STR(PTR_WD) " 2b,8b\n"
  1288. STR(PTR_WD) " 3b,8b\n"
  1289. STR(PTR_WD) " 4b,8b\n"
  1290. " .previous\n"
  1291. " .set pop\n"
  1292. : "+&r"(rt), "=&r"(rs),
  1293. "+&r"(vaddr), "+&r"(err)
  1294. : "i"(SIGSEGV)
  1295. : "memory");
  1296. MIPS_R2_STATS(stores);
  1297. break;
  1298. case ldl_op:
  1299. if (IS_ENABLED(CONFIG_32BIT)) {
  1300. err = SIGILL;
  1301. break;
  1302. }
  1303. rt = regs->regs[MIPSInst_RT(inst)];
  1304. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1305. if (!access_ok((void __user *)vaddr, 8)) {
  1306. current->thread.cp0_baduaddr = vaddr;
  1307. err = SIGSEGV;
  1308. break;
  1309. }
  1310. __asm__ __volatile__(
  1311. " .set push\n"
  1312. " .set reorder\n"
  1313. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1314. "1: lb %1, 0(%2)\n"
  1315. " dinsu %0, %1, 56, 8\n"
  1316. " andi %1, %2, 0x7\n"
  1317. " beq $0, %1, 9f\n"
  1318. " daddiu %2, %2, -1\n"
  1319. "2: lb %1, 0(%2)\n"
  1320. " dinsu %0, %1, 48, 8\n"
  1321. " andi %1, %2, 0x7\n"
  1322. " beq $0, %1, 9f\n"
  1323. " daddiu %2, %2, -1\n"
  1324. "3: lb %1, 0(%2)\n"
  1325. " dinsu %0, %1, 40, 8\n"
  1326. " andi %1, %2, 0x7\n"
  1327. " beq $0, %1, 9f\n"
  1328. " daddiu %2, %2, -1\n"
  1329. "4: lb %1, 0(%2)\n"
  1330. " dinsu %0, %1, 32, 8\n"
  1331. " andi %1, %2, 0x7\n"
  1332. " beq $0, %1, 9f\n"
  1333. " daddiu %2, %2, -1\n"
  1334. "5: lb %1, 0(%2)\n"
  1335. " dins %0, %1, 24, 8\n"
  1336. " andi %1, %2, 0x7\n"
  1337. " beq $0, %1, 9f\n"
  1338. " daddiu %2, %2, -1\n"
  1339. "6: lb %1, 0(%2)\n"
  1340. " dins %0, %1, 16, 8\n"
  1341. " andi %1, %2, 0x7\n"
  1342. " beq $0, %1, 9f\n"
  1343. " daddiu %2, %2, -1\n"
  1344. "7: lb %1, 0(%2)\n"
  1345. " dins %0, %1, 8, 8\n"
  1346. " andi %1, %2, 0x7\n"
  1347. " beq $0, %1, 9f\n"
  1348. " daddiu %2, %2, -1\n"
  1349. "0: lb %1, 0(%2)\n"
  1350. " dins %0, %1, 0, 8\n"
  1351. #else /* !CONFIG_CPU_LITTLE_ENDIAN */
  1352. "1: lb %1, 0(%2)\n"
  1353. " dinsu %0, %1, 56, 8\n"
  1354. " daddiu %2, %2, 1\n"
  1355. " andi %1, %2, 0x7\n"
  1356. " beq $0, %1, 9f\n"
  1357. "2: lb %1, 0(%2)\n"
  1358. " dinsu %0, %1, 48, 8\n"
  1359. " daddiu %2, %2, 1\n"
  1360. " andi %1, %2, 0x7\n"
  1361. " beq $0, %1, 9f\n"
  1362. "3: lb %1, 0(%2)\n"
  1363. " dinsu %0, %1, 40, 8\n"
  1364. " daddiu %2, %2, 1\n"
  1365. " andi %1, %2, 0x7\n"
  1366. " beq $0, %1, 9f\n"
  1367. "4: lb %1, 0(%2)\n"
  1368. " dinsu %0, %1, 32, 8\n"
  1369. " daddiu %2, %2, 1\n"
  1370. " andi %1, %2, 0x7\n"
  1371. " beq $0, %1, 9f\n"
  1372. "5: lb %1, 0(%2)\n"
  1373. " dins %0, %1, 24, 8\n"
  1374. " daddiu %2, %2, 1\n"
  1375. " andi %1, %2, 0x7\n"
  1376. " beq $0, %1, 9f\n"
  1377. "6: lb %1, 0(%2)\n"
  1378. " dins %0, %1, 16, 8\n"
  1379. " daddiu %2, %2, 1\n"
  1380. " andi %1, %2, 0x7\n"
  1381. " beq $0, %1, 9f\n"
  1382. "7: lb %1, 0(%2)\n"
  1383. " dins %0, %1, 8, 8\n"
  1384. " daddiu %2, %2, 1\n"
  1385. " andi %1, %2, 0x7\n"
  1386. " beq $0, %1, 9f\n"
  1387. "0: lb %1, 0(%2)\n"
  1388. " dins %0, %1, 0, 8\n"
  1389. #endif /* CONFIG_CPU_LITTLE_ENDIAN */
  1390. "9:\n"
  1391. " .insn\n"
  1392. " .section .fixup,\"ax\"\n"
  1393. "8: li %3,%4\n"
  1394. " j 9b\n"
  1395. " .previous\n"
  1396. " .section __ex_table,\"a\"\n"
  1397. STR(PTR_WD) " 1b,8b\n"
  1398. STR(PTR_WD) " 2b,8b\n"
  1399. STR(PTR_WD) " 3b,8b\n"
  1400. STR(PTR_WD) " 4b,8b\n"
  1401. STR(PTR_WD) " 5b,8b\n"
  1402. STR(PTR_WD) " 6b,8b\n"
  1403. STR(PTR_WD) " 7b,8b\n"
  1404. STR(PTR_WD) " 0b,8b\n"
  1405. " .previous\n"
  1406. " .set pop\n"
  1407. : "+&r"(rt), "=&r"(rs),
  1408. "+&r"(vaddr), "+&r"(err)
  1409. : "i"(SIGSEGV));
  1410. if (MIPSInst_RT(inst) && !err)
  1411. regs->regs[MIPSInst_RT(inst)] = rt;
  1412. MIPS_R2_STATS(loads);
  1413. break;
  1414. case ldr_op:
  1415. if (IS_ENABLED(CONFIG_32BIT)) {
  1416. err = SIGILL;
  1417. break;
  1418. }
  1419. rt = regs->regs[MIPSInst_RT(inst)];
  1420. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1421. if (!access_ok((void __user *)vaddr, 8)) {
  1422. current->thread.cp0_baduaddr = vaddr;
  1423. err = SIGSEGV;
  1424. break;
  1425. }
  1426. __asm__ __volatile__(
  1427. " .set push\n"
  1428. " .set reorder\n"
  1429. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1430. "1: lb %1, 0(%2)\n"
  1431. " dins %0, %1, 0, 8\n"
  1432. " daddiu %2, %2, 1\n"
  1433. " andi %1, %2, 0x7\n"
  1434. " beq $0, %1, 9f\n"
  1435. "2: lb %1, 0(%2)\n"
  1436. " dins %0, %1, 8, 8\n"
  1437. " daddiu %2, %2, 1\n"
  1438. " andi %1, %2, 0x7\n"
  1439. " beq $0, %1, 9f\n"
  1440. "3: lb %1, 0(%2)\n"
  1441. " dins %0, %1, 16, 8\n"
  1442. " daddiu %2, %2, 1\n"
  1443. " andi %1, %2, 0x7\n"
  1444. " beq $0, %1, 9f\n"
  1445. "4: lb %1, 0(%2)\n"
  1446. " dins %0, %1, 24, 8\n"
  1447. " daddiu %2, %2, 1\n"
  1448. " andi %1, %2, 0x7\n"
  1449. " beq $0, %1, 9f\n"
  1450. "5: lb %1, 0(%2)\n"
  1451. " dinsu %0, %1, 32, 8\n"
  1452. " daddiu %2, %2, 1\n"
  1453. " andi %1, %2, 0x7\n"
  1454. " beq $0, %1, 9f\n"
  1455. "6: lb %1, 0(%2)\n"
  1456. " dinsu %0, %1, 40, 8\n"
  1457. " daddiu %2, %2, 1\n"
  1458. " andi %1, %2, 0x7\n"
  1459. " beq $0, %1, 9f\n"
  1460. "7: lb %1, 0(%2)\n"
  1461. " dinsu %0, %1, 48, 8\n"
  1462. " daddiu %2, %2, 1\n"
  1463. " andi %1, %2, 0x7\n"
  1464. " beq $0, %1, 9f\n"
  1465. "0: lb %1, 0(%2)\n"
  1466. " dinsu %0, %1, 56, 8\n"
  1467. #else /* !CONFIG_CPU_LITTLE_ENDIAN */
  1468. "1: lb %1, 0(%2)\n"
  1469. " dins %0, %1, 0, 8\n"
  1470. " andi %1, %2, 0x7\n"
  1471. " beq $0, %1, 9f\n"
  1472. " daddiu %2, %2, -1\n"
  1473. "2: lb %1, 0(%2)\n"
  1474. " dins %0, %1, 8, 8\n"
  1475. " andi %1, %2, 0x7\n"
  1476. " beq $0, %1, 9f\n"
  1477. " daddiu %2, %2, -1\n"
  1478. "3: lb %1, 0(%2)\n"
  1479. " dins %0, %1, 16, 8\n"
  1480. " andi %1, %2, 0x7\n"
  1481. " beq $0, %1, 9f\n"
  1482. " daddiu %2, %2, -1\n"
  1483. "4: lb %1, 0(%2)\n"
  1484. " dins %0, %1, 24, 8\n"
  1485. " andi %1, %2, 0x7\n"
  1486. " beq $0, %1, 9f\n"
  1487. " daddiu %2, %2, -1\n"
  1488. "5: lb %1, 0(%2)\n"
  1489. " dinsu %0, %1, 32, 8\n"
  1490. " andi %1, %2, 0x7\n"
  1491. " beq $0, %1, 9f\n"
  1492. " daddiu %2, %2, -1\n"
  1493. "6: lb %1, 0(%2)\n"
  1494. " dinsu %0, %1, 40, 8\n"
  1495. " andi %1, %2, 0x7\n"
  1496. " beq $0, %1, 9f\n"
  1497. " daddiu %2, %2, -1\n"
  1498. "7: lb %1, 0(%2)\n"
  1499. " dinsu %0, %1, 48, 8\n"
  1500. " andi %1, %2, 0x7\n"
  1501. " beq $0, %1, 9f\n"
  1502. " daddiu %2, %2, -1\n"
  1503. "0: lb %1, 0(%2)\n"
  1504. " dinsu %0, %1, 56, 8\n"
  1505. #endif /* CONFIG_CPU_LITTLE_ENDIAN */
  1506. "9:\n"
  1507. " .insn\n"
  1508. " .section .fixup,\"ax\"\n"
  1509. "8: li %3,%4\n"
  1510. " j 9b\n"
  1511. " .previous\n"
  1512. " .section __ex_table,\"a\"\n"
  1513. STR(PTR_WD) " 1b,8b\n"
  1514. STR(PTR_WD) " 2b,8b\n"
  1515. STR(PTR_WD) " 3b,8b\n"
  1516. STR(PTR_WD) " 4b,8b\n"
  1517. STR(PTR_WD) " 5b,8b\n"
  1518. STR(PTR_WD) " 6b,8b\n"
  1519. STR(PTR_WD) " 7b,8b\n"
  1520. STR(PTR_WD) " 0b,8b\n"
  1521. " .previous\n"
  1522. " .set pop\n"
  1523. : "+&r"(rt), "=&r"(rs),
  1524. "+&r"(vaddr), "+&r"(err)
  1525. : "i"(SIGSEGV));
  1526. if (MIPSInst_RT(inst) && !err)
  1527. regs->regs[MIPSInst_RT(inst)] = rt;
  1528. MIPS_R2_STATS(loads);
  1529. break;
  1530. case sdl_op:
  1531. if (IS_ENABLED(CONFIG_32BIT)) {
  1532. err = SIGILL;
  1533. break;
  1534. }
  1535. rt = regs->regs[MIPSInst_RT(inst)];
  1536. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1537. if (!access_ok((void __user *)vaddr, 8)) {
  1538. current->thread.cp0_baduaddr = vaddr;
  1539. err = SIGSEGV;
  1540. break;
  1541. }
  1542. __asm__ __volatile__(
  1543. " .set push\n"
  1544. " .set reorder\n"
  1545. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1546. " dextu %1, %0, 56, 8\n"
  1547. "1: sb %1, 0(%2)\n"
  1548. " andi %1, %2, 0x7\n"
  1549. " beq $0, %1, 9f\n"
  1550. " daddiu %2, %2, -1\n"
  1551. " dextu %1, %0, 48, 8\n"
  1552. "2: sb %1, 0(%2)\n"
  1553. " andi %1, %2, 0x7\n"
  1554. " beq $0, %1, 9f\n"
  1555. " daddiu %2, %2, -1\n"
  1556. " dextu %1, %0, 40, 8\n"
  1557. "3: sb %1, 0(%2)\n"
  1558. " andi %1, %2, 0x7\n"
  1559. " beq $0, %1, 9f\n"
  1560. " daddiu %2, %2, -1\n"
  1561. " dextu %1, %0, 32, 8\n"
  1562. "4: sb %1, 0(%2)\n"
  1563. " andi %1, %2, 0x7\n"
  1564. " beq $0, %1, 9f\n"
  1565. " daddiu %2, %2, -1\n"
  1566. " dext %1, %0, 24, 8\n"
  1567. "5: sb %1, 0(%2)\n"
  1568. " andi %1, %2, 0x7\n"
  1569. " beq $0, %1, 9f\n"
  1570. " daddiu %2, %2, -1\n"
  1571. " dext %1, %0, 16, 8\n"
  1572. "6: sb %1, 0(%2)\n"
  1573. " andi %1, %2, 0x7\n"
  1574. " beq $0, %1, 9f\n"
  1575. " daddiu %2, %2, -1\n"
  1576. " dext %1, %0, 8, 8\n"
  1577. "7: sb %1, 0(%2)\n"
  1578. " andi %1, %2, 0x7\n"
  1579. " beq $0, %1, 9f\n"
  1580. " daddiu %2, %2, -1\n"
  1581. " dext %1, %0, 0, 8\n"
  1582. "0: sb %1, 0(%2)\n"
  1583. #else /* !CONFIG_CPU_LITTLE_ENDIAN */
  1584. " dextu %1, %0, 56, 8\n"
  1585. "1: sb %1, 0(%2)\n"
  1586. " daddiu %2, %2, 1\n"
  1587. " andi %1, %2, 0x7\n"
  1588. " beq $0, %1, 9f\n"
  1589. " dextu %1, %0, 48, 8\n"
  1590. "2: sb %1, 0(%2)\n"
  1591. " daddiu %2, %2, 1\n"
  1592. " andi %1, %2, 0x7\n"
  1593. " beq $0, %1, 9f\n"
  1594. " dextu %1, %0, 40, 8\n"
  1595. "3: sb %1, 0(%2)\n"
  1596. " daddiu %2, %2, 1\n"
  1597. " andi %1, %2, 0x7\n"
  1598. " beq $0, %1, 9f\n"
  1599. " dextu %1, %0, 32, 8\n"
  1600. "4: sb %1, 0(%2)\n"
  1601. " daddiu %2, %2, 1\n"
  1602. " andi %1, %2, 0x7\n"
  1603. " beq $0, %1, 9f\n"
  1604. " dext %1, %0, 24, 8\n"
  1605. "5: sb %1, 0(%2)\n"
  1606. " daddiu %2, %2, 1\n"
  1607. " andi %1, %2, 0x7\n"
  1608. " beq $0, %1, 9f\n"
  1609. " dext %1, %0, 16, 8\n"
  1610. "6: sb %1, 0(%2)\n"
  1611. " daddiu %2, %2, 1\n"
  1612. " andi %1, %2, 0x7\n"
  1613. " beq $0, %1, 9f\n"
  1614. " dext %1, %0, 8, 8\n"
  1615. "7: sb %1, 0(%2)\n"
  1616. " daddiu %2, %2, 1\n"
  1617. " andi %1, %2, 0x7\n"
  1618. " beq $0, %1, 9f\n"
  1619. " dext %1, %0, 0, 8\n"
  1620. "0: sb %1, 0(%2)\n"
  1621. #endif /* CONFIG_CPU_LITTLE_ENDIAN */
  1622. "9:\n"
  1623. " .insn\n"
  1624. " .section .fixup,\"ax\"\n"
  1625. "8: li %3,%4\n"
  1626. " j 9b\n"
  1627. " .previous\n"
  1628. " .section __ex_table,\"a\"\n"
  1629. STR(PTR_WD) " 1b,8b\n"
  1630. STR(PTR_WD) " 2b,8b\n"
  1631. STR(PTR_WD) " 3b,8b\n"
  1632. STR(PTR_WD) " 4b,8b\n"
  1633. STR(PTR_WD) " 5b,8b\n"
  1634. STR(PTR_WD) " 6b,8b\n"
  1635. STR(PTR_WD) " 7b,8b\n"
  1636. STR(PTR_WD) " 0b,8b\n"
  1637. " .previous\n"
  1638. " .set pop\n"
  1639. : "+&r"(rt), "=&r"(rs),
  1640. "+&r"(vaddr), "+&r"(err)
  1641. : "i"(SIGSEGV)
  1642. : "memory");
  1643. MIPS_R2_STATS(stores);
  1644. break;
  1645. case sdr_op:
  1646. if (IS_ENABLED(CONFIG_32BIT)) {
  1647. err = SIGILL;
  1648. break;
  1649. }
  1650. rt = regs->regs[MIPSInst_RT(inst)];
  1651. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1652. if (!access_ok((void __user *)vaddr, 8)) {
  1653. current->thread.cp0_baduaddr = vaddr;
  1654. err = SIGSEGV;
  1655. break;
  1656. }
  1657. __asm__ __volatile__(
  1658. " .set push\n"
  1659. " .set reorder\n"
  1660. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1661. " dext %1, %0, 0, 8\n"
  1662. "1: sb %1, 0(%2)\n"
  1663. " daddiu %2, %2, 1\n"
  1664. " andi %1, %2, 0x7\n"
  1665. " beq $0, %1, 9f\n"
  1666. " dext %1, %0, 8, 8\n"
  1667. "2: sb %1, 0(%2)\n"
  1668. " daddiu %2, %2, 1\n"
  1669. " andi %1, %2, 0x7\n"
  1670. " beq $0, %1, 9f\n"
  1671. " dext %1, %0, 16, 8\n"
  1672. "3: sb %1, 0(%2)\n"
  1673. " daddiu %2, %2, 1\n"
  1674. " andi %1, %2, 0x7\n"
  1675. " beq $0, %1, 9f\n"
  1676. " dext %1, %0, 24, 8\n"
  1677. "4: sb %1, 0(%2)\n"
  1678. " daddiu %2, %2, 1\n"
  1679. " andi %1, %2, 0x7\n"
  1680. " beq $0, %1, 9f\n"
  1681. " dextu %1, %0, 32, 8\n"
  1682. "5: sb %1, 0(%2)\n"
  1683. " daddiu %2, %2, 1\n"
  1684. " andi %1, %2, 0x7\n"
  1685. " beq $0, %1, 9f\n"
  1686. " dextu %1, %0, 40, 8\n"
  1687. "6: sb %1, 0(%2)\n"
  1688. " daddiu %2, %2, 1\n"
  1689. " andi %1, %2, 0x7\n"
  1690. " beq $0, %1, 9f\n"
  1691. " dextu %1, %0, 48, 8\n"
  1692. "7: sb %1, 0(%2)\n"
  1693. " daddiu %2, %2, 1\n"
  1694. " andi %1, %2, 0x7\n"
  1695. " beq $0, %1, 9f\n"
  1696. " dextu %1, %0, 56, 8\n"
  1697. "0: sb %1, 0(%2)\n"
  1698. #else /* !CONFIG_CPU_LITTLE_ENDIAN */
  1699. " dext %1, %0, 0, 8\n"
  1700. "1: sb %1, 0(%2)\n"
  1701. " andi %1, %2, 0x7\n"
  1702. " beq $0, %1, 9f\n"
  1703. " daddiu %2, %2, -1\n"
  1704. " dext %1, %0, 8, 8\n"
  1705. "2: sb %1, 0(%2)\n"
  1706. " andi %1, %2, 0x7\n"
  1707. " beq $0, %1, 9f\n"
  1708. " daddiu %2, %2, -1\n"
  1709. " dext %1, %0, 16, 8\n"
  1710. "3: sb %1, 0(%2)\n"
  1711. " andi %1, %2, 0x7\n"
  1712. " beq $0, %1, 9f\n"
  1713. " daddiu %2, %2, -1\n"
  1714. " dext %1, %0, 24, 8\n"
  1715. "4: sb %1, 0(%2)\n"
  1716. " andi %1, %2, 0x7\n"
  1717. " beq $0, %1, 9f\n"
  1718. " daddiu %2, %2, -1\n"
  1719. " dextu %1, %0, 32, 8\n"
  1720. "5: sb %1, 0(%2)\n"
  1721. " andi %1, %2, 0x7\n"
  1722. " beq $0, %1, 9f\n"
  1723. " daddiu %2, %2, -1\n"
  1724. " dextu %1, %0, 40, 8\n"
  1725. "6: sb %1, 0(%2)\n"
  1726. " andi %1, %2, 0x7\n"
  1727. " beq $0, %1, 9f\n"
  1728. " daddiu %2, %2, -1\n"
  1729. " dextu %1, %0, 48, 8\n"
  1730. "7: sb %1, 0(%2)\n"
  1731. " andi %1, %2, 0x7\n"
  1732. " beq $0, %1, 9f\n"
  1733. " daddiu %2, %2, -1\n"
  1734. " dextu %1, %0, 56, 8\n"
  1735. "0: sb %1, 0(%2)\n"
  1736. #endif /* CONFIG_CPU_LITTLE_ENDIAN */
  1737. "9:\n"
  1738. " .insn\n"
  1739. " .section .fixup,\"ax\"\n"
  1740. "8: li %3,%4\n"
  1741. " j 9b\n"
  1742. " .previous\n"
  1743. " .section __ex_table,\"a\"\n"
  1744. STR(PTR_WD) " 1b,8b\n"
  1745. STR(PTR_WD) " 2b,8b\n"
  1746. STR(PTR_WD) " 3b,8b\n"
  1747. STR(PTR_WD) " 4b,8b\n"
  1748. STR(PTR_WD) " 5b,8b\n"
  1749. STR(PTR_WD) " 6b,8b\n"
  1750. STR(PTR_WD) " 7b,8b\n"
  1751. STR(PTR_WD) " 0b,8b\n"
  1752. " .previous\n"
  1753. " .set pop\n"
  1754. : "+&r"(rt), "=&r"(rs),
  1755. "+&r"(vaddr), "+&r"(err)
  1756. : "i"(SIGSEGV)
  1757. : "memory");
  1758. MIPS_R2_STATS(stores);
  1759. break;
  1760. case ll_op:
  1761. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1762. if (vaddr & 0x3) {
  1763. current->thread.cp0_baduaddr = vaddr;
  1764. err = SIGBUS;
  1765. break;
  1766. }
  1767. if (!access_ok((void __user *)vaddr, 4)) {
  1768. current->thread.cp0_baduaddr = vaddr;
  1769. err = SIGBUS;
  1770. break;
  1771. }
  1772. if (!cpu_has_rw_llb) {
  1773. /*
  1774. * An LL/SC block can't be safely emulated without
  1775. * a Config5/LLB availability. So it's probably time to
  1776. * kill our process before things get any worse. This is
  1777. * because Config5/LLB allows us to use ERETNC so that
  1778. * the LLAddr/LLB bit is not cleared when we return from
  1779. * an exception. MIPS R2 LL/SC instructions trap with an
  1780. * RI exception so once we emulate them here, we return
  1781. * back to userland with ERETNC. That preserves the
  1782. * LLAddr/LLB so the subsequent SC instruction will
  1783. * succeed preserving the atomic semantics of the LL/SC
  1784. * block. Without that, there is no safe way to emulate
  1785. * an LL/SC block in MIPSR2 userland.
  1786. */
  1787. pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
  1788. err = SIGKILL;
  1789. break;
  1790. }
  1791. __asm__ __volatile__(
  1792. "1:\n"
  1793. "ll %0, 0(%2)\n"
  1794. "2:\n"
  1795. ".insn\n"
  1796. ".section .fixup,\"ax\"\n"
  1797. "3:\n"
  1798. "li %1, %3\n"
  1799. "j 2b\n"
  1800. ".previous\n"
  1801. ".section __ex_table,\"a\"\n"
  1802. STR(PTR_WD) " 1b,3b\n"
  1803. ".previous\n"
  1804. : "=&r"(res), "+&r"(err)
  1805. : "r"(vaddr), "i"(SIGSEGV)
  1806. : "memory");
  1807. if (MIPSInst_RT(inst) && !err)
  1808. regs->regs[MIPSInst_RT(inst)] = res;
  1809. MIPS_R2_STATS(llsc);
  1810. break;
  1811. case sc_op:
  1812. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1813. if (vaddr & 0x3) {
  1814. current->thread.cp0_baduaddr = vaddr;
  1815. err = SIGBUS;
  1816. break;
  1817. }
  1818. if (!access_ok((void __user *)vaddr, 4)) {
  1819. current->thread.cp0_baduaddr = vaddr;
  1820. err = SIGBUS;
  1821. break;
  1822. }
  1823. if (!cpu_has_rw_llb) {
  1824. /*
  1825. * An LL/SC block can't be safely emulated without
  1826. * a Config5/LLB availability. So it's probably time to
  1827. * kill our process before things get any worse. This is
  1828. * because Config5/LLB allows us to use ERETNC so that
  1829. * the LLAddr/LLB bit is not cleared when we return from
  1830. * an exception. MIPS R2 LL/SC instructions trap with an
  1831. * RI exception so once we emulate them here, we return
  1832. * back to userland with ERETNC. That preserves the
  1833. * LLAddr/LLB so the subsequent SC instruction will
  1834. * succeed preserving the atomic semantics of the LL/SC
  1835. * block. Without that, there is no safe way to emulate
  1836. * an LL/SC block in MIPSR2 userland.
  1837. */
  1838. pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
  1839. err = SIGKILL;
  1840. break;
  1841. }
  1842. res = regs->regs[MIPSInst_RT(inst)];
  1843. __asm__ __volatile__(
  1844. "1:\n"
  1845. "sc %0, 0(%2)\n"
  1846. "2:\n"
  1847. ".insn\n"
  1848. ".section .fixup,\"ax\"\n"
  1849. "3:\n"
  1850. "li %1, %3\n"
  1851. "j 2b\n"
  1852. ".previous\n"
  1853. ".section __ex_table,\"a\"\n"
  1854. STR(PTR_WD) " 1b,3b\n"
  1855. ".previous\n"
  1856. : "+&r"(res), "+&r"(err)
  1857. : "r"(vaddr), "i"(SIGSEGV));
  1858. if (MIPSInst_RT(inst) && !err)
  1859. regs->regs[MIPSInst_RT(inst)] = res;
  1860. MIPS_R2_STATS(llsc);
  1861. break;
  1862. case lld_op:
  1863. if (IS_ENABLED(CONFIG_32BIT)) {
  1864. err = SIGILL;
  1865. break;
  1866. }
  1867. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1868. if (vaddr & 0x7) {
  1869. current->thread.cp0_baduaddr = vaddr;
  1870. err = SIGBUS;
  1871. break;
  1872. }
  1873. if (!access_ok((void __user *)vaddr, 8)) {
  1874. current->thread.cp0_baduaddr = vaddr;
  1875. err = SIGBUS;
  1876. break;
  1877. }
  1878. if (!cpu_has_rw_llb) {
  1879. /*
  1880. * An LL/SC block can't be safely emulated without
  1881. * a Config5/LLB availability. So it's probably time to
  1882. * kill our process before things get any worse. This is
  1883. * because Config5/LLB allows us to use ERETNC so that
  1884. * the LLAddr/LLB bit is not cleared when we return from
  1885. * an exception. MIPS R2 LL/SC instructions trap with an
  1886. * RI exception so once we emulate them here, we return
  1887. * back to userland with ERETNC. That preserves the
  1888. * LLAddr/LLB so the subsequent SC instruction will
  1889. * succeed preserving the atomic semantics of the LL/SC
  1890. * block. Without that, there is no safe way to emulate
  1891. * an LL/SC block in MIPSR2 userland.
  1892. */
  1893. pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
  1894. err = SIGKILL;
  1895. break;
  1896. }
  1897. __asm__ __volatile__(
  1898. "1:\n"
  1899. "lld %0, 0(%2)\n"
  1900. "2:\n"
  1901. ".insn\n"
  1902. ".section .fixup,\"ax\"\n"
  1903. "3:\n"
  1904. "li %1, %3\n"
  1905. "j 2b\n"
  1906. ".previous\n"
  1907. ".section __ex_table,\"a\"\n"
  1908. STR(PTR_WD) " 1b,3b\n"
  1909. ".previous\n"
  1910. : "=&r"(res), "+&r"(err)
  1911. : "r"(vaddr), "i"(SIGSEGV)
  1912. : "memory");
  1913. if (MIPSInst_RT(inst) && !err)
  1914. regs->regs[MIPSInst_RT(inst)] = res;
  1915. MIPS_R2_STATS(llsc);
  1916. break;
  1917. case scd_op:
  1918. if (IS_ENABLED(CONFIG_32BIT)) {
  1919. err = SIGILL;
  1920. break;
  1921. }
  1922. vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
  1923. if (vaddr & 0x7) {
  1924. current->thread.cp0_baduaddr = vaddr;
  1925. err = SIGBUS;
  1926. break;
  1927. }
  1928. if (!access_ok((void __user *)vaddr, 8)) {
  1929. current->thread.cp0_baduaddr = vaddr;
  1930. err = SIGBUS;
  1931. break;
  1932. }
  1933. if (!cpu_has_rw_llb) {
  1934. /*
  1935. * An LL/SC block can't be safely emulated without
  1936. * a Config5/LLB availability. So it's probably time to
  1937. * kill our process before things get any worse. This is
  1938. * because Config5/LLB allows us to use ERETNC so that
  1939. * the LLAddr/LLB bit is not cleared when we return from
  1940. * an exception. MIPS R2 LL/SC instructions trap with an
  1941. * RI exception so once we emulate them here, we return
  1942. * back to userland with ERETNC. That preserves the
  1943. * LLAddr/LLB so the subsequent SC instruction will
  1944. * succeed preserving the atomic semantics of the LL/SC
  1945. * block. Without that, there is no safe way to emulate
  1946. * an LL/SC block in MIPSR2 userland.
  1947. */
  1948. pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
  1949. err = SIGKILL;
  1950. break;
  1951. }
  1952. res = regs->regs[MIPSInst_RT(inst)];
  1953. __asm__ __volatile__(
  1954. "1:\n"
  1955. "scd %0, 0(%2)\n"
  1956. "2:\n"
  1957. ".insn\n"
  1958. ".section .fixup,\"ax\"\n"
  1959. "3:\n"
  1960. "li %1, %3\n"
  1961. "j 2b\n"
  1962. ".previous\n"
  1963. ".section __ex_table,\"a\"\n"
  1964. STR(PTR_WD) " 1b,3b\n"
  1965. ".previous\n"
  1966. : "+&r"(res), "+&r"(err)
  1967. : "r"(vaddr), "i"(SIGSEGV));
  1968. if (MIPSInst_RT(inst) && !err)
  1969. regs->regs[MIPSInst_RT(inst)] = res;
  1970. MIPS_R2_STATS(llsc);
  1971. break;
  1972. case pref_op:
  1973. /* skip it */
  1974. break;
  1975. default:
  1976. err = SIGILL;
  1977. }
  1978. /*
  1979. * Let's not return to userland just yet. It's costly and
  1980. * it's likely we have more R2 instructions to emulate
  1981. */
  1982. if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
  1983. regs->cp0_cause &= ~CAUSEF_BD;
  1984. err = get_user(inst, (u32 __user *)regs->cp0_epc);
  1985. if (!err)
  1986. goto repeat;
  1987. if (err < 0)
  1988. err = SIGSEGV;
  1989. }
  1990. if (err && (err != SIGEMT)) {
  1991. regs->regs[31] = r31;
  1992. regs->cp0_epc = epc;
  1993. }
  1994. /* Likely a MIPS R6 compatible instruction */
  1995. if (pass && (err == SIGILL))
  1996. err = 0;
  1997. return err;
  1998. }
  1999. #ifdef CONFIG_DEBUG_FS
  2000. static int mipsr2_emul_show(struct seq_file *s, void *unused)
  2001. {
  2002. seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
  2003. seq_printf(s, "movs\t\t%ld\t%ld\n",
  2004. (unsigned long)__this_cpu_read(mipsr2emustats.movs),
  2005. (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
  2006. seq_printf(s, "hilo\t\t%ld\t%ld\n",
  2007. (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
  2008. (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
  2009. seq_printf(s, "muls\t\t%ld\t%ld\n",
  2010. (unsigned long)__this_cpu_read(mipsr2emustats.muls),
  2011. (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
  2012. seq_printf(s, "divs\t\t%ld\t%ld\n",
  2013. (unsigned long)__this_cpu_read(mipsr2emustats.divs),
  2014. (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
  2015. seq_printf(s, "dsps\t\t%ld\t%ld\n",
  2016. (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
  2017. (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
  2018. seq_printf(s, "bops\t\t%ld\t%ld\n",
  2019. (unsigned long)__this_cpu_read(mipsr2emustats.bops),
  2020. (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
  2021. seq_printf(s, "traps\t\t%ld\t%ld\n",
  2022. (unsigned long)__this_cpu_read(mipsr2emustats.traps),
  2023. (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
  2024. seq_printf(s, "fpus\t\t%ld\t%ld\n",
  2025. (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
  2026. (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
  2027. seq_printf(s, "loads\t\t%ld\t%ld\n",
  2028. (unsigned long)__this_cpu_read(mipsr2emustats.loads),
  2029. (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
  2030. seq_printf(s, "stores\t\t%ld\t%ld\n",
  2031. (unsigned long)__this_cpu_read(mipsr2emustats.stores),
  2032. (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
  2033. seq_printf(s, "llsc\t\t%ld\t%ld\n",
  2034. (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
  2035. (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
  2036. seq_printf(s, "dsemul\t\t%ld\t%ld\n",
  2037. (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
  2038. (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
  2039. seq_printf(s, "jr\t\t%ld\n",
  2040. (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
  2041. seq_printf(s, "bltzl\t\t%ld\n",
  2042. (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
  2043. seq_printf(s, "bgezl\t\t%ld\n",
  2044. (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
  2045. seq_printf(s, "bltzll\t\t%ld\n",
  2046. (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
  2047. seq_printf(s, "bgezll\t\t%ld\n",
  2048. (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
  2049. seq_printf(s, "bltzal\t\t%ld\n",
  2050. (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
  2051. seq_printf(s, "bgezal\t\t%ld\n",
  2052. (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
  2053. seq_printf(s, "beql\t\t%ld\n",
  2054. (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
  2055. seq_printf(s, "bnel\t\t%ld\n",
  2056. (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
  2057. seq_printf(s, "blezl\t\t%ld\n",
  2058. (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
  2059. seq_printf(s, "bgtzl\t\t%ld\n",
  2060. (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
  2061. return 0;
  2062. }
  2063. static int mipsr2_clear_show(struct seq_file *s, void *unused)
  2064. {
  2065. mipsr2_emul_show(s, unused);
  2066. __this_cpu_write((mipsr2emustats).movs, 0);
  2067. __this_cpu_write((mipsr2bdemustats).movs, 0);
  2068. __this_cpu_write((mipsr2emustats).hilo, 0);
  2069. __this_cpu_write((mipsr2bdemustats).hilo, 0);
  2070. __this_cpu_write((mipsr2emustats).muls, 0);
  2071. __this_cpu_write((mipsr2bdemustats).muls, 0);
  2072. __this_cpu_write((mipsr2emustats).divs, 0);
  2073. __this_cpu_write((mipsr2bdemustats).divs, 0);
  2074. __this_cpu_write((mipsr2emustats).dsps, 0);
  2075. __this_cpu_write((mipsr2bdemustats).dsps, 0);
  2076. __this_cpu_write((mipsr2emustats).bops, 0);
  2077. __this_cpu_write((mipsr2bdemustats).bops, 0);
  2078. __this_cpu_write((mipsr2emustats).traps, 0);
  2079. __this_cpu_write((mipsr2bdemustats).traps, 0);
  2080. __this_cpu_write((mipsr2emustats).fpus, 0);
  2081. __this_cpu_write((mipsr2bdemustats).fpus, 0);
  2082. __this_cpu_write((mipsr2emustats).loads, 0);
  2083. __this_cpu_write((mipsr2bdemustats).loads, 0);
  2084. __this_cpu_write((mipsr2emustats).stores, 0);
  2085. __this_cpu_write((mipsr2bdemustats).stores, 0);
  2086. __this_cpu_write((mipsr2emustats).llsc, 0);
  2087. __this_cpu_write((mipsr2bdemustats).llsc, 0);
  2088. __this_cpu_write((mipsr2emustats).dsemul, 0);
  2089. __this_cpu_write((mipsr2bdemustats).dsemul, 0);
  2090. __this_cpu_write((mipsr2bremustats).jrs, 0);
  2091. __this_cpu_write((mipsr2bremustats).bltzl, 0);
  2092. __this_cpu_write((mipsr2bremustats).bgezl, 0);
  2093. __this_cpu_write((mipsr2bremustats).bltzll, 0);
  2094. __this_cpu_write((mipsr2bremustats).bgezll, 0);
  2095. __this_cpu_write((mipsr2bremustats).bltzall, 0);
  2096. __this_cpu_write((mipsr2bremustats).bgezall, 0);
  2097. __this_cpu_write((mipsr2bremustats).bltzal, 0);
  2098. __this_cpu_write((mipsr2bremustats).bgezal, 0);
  2099. __this_cpu_write((mipsr2bremustats).beql, 0);
  2100. __this_cpu_write((mipsr2bremustats).bnel, 0);
  2101. __this_cpu_write((mipsr2bremustats).blezl, 0);
  2102. __this_cpu_write((mipsr2bremustats).bgtzl, 0);
  2103. return 0;
  2104. }
  2105. DEFINE_SHOW_ATTRIBUTE(mipsr2_emul);
  2106. DEFINE_SHOW_ATTRIBUTE(mipsr2_clear);
  2107. static int __init mipsr2_init_debugfs(void)
  2108. {
  2109. debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL,
  2110. &mipsr2_emul_fops);
  2111. debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir,
  2112. NULL, &mipsr2_clear_fops);
  2113. return 0;
  2114. }
  2115. device_initcall(mipsr2_init_debugfs);
  2116. #endif /* CONFIG_DEBUG_FS */