sstep.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Single-step support.
  4. *
  5. * Copyright (C) 2004 Paul Mackerras <[email protected]>, IBM
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/ptrace.h>
  10. #include <linux/prefetch.h>
  11. #include <asm/sstep.h>
  12. #include <asm/processor.h>
  13. #include <linux/uaccess.h>
  14. #include <asm/cpu_has_feature.h>
  15. #include <asm/cputable.h>
  16. #include <asm/disassemble.h>
  17. #ifdef CONFIG_PPC64
  18. /* Bits in SRR1 that are copied from MSR */
  19. #define MSR_MASK 0xffffffff87c0ffffUL
  20. #else
  21. #define MSR_MASK 0x87c0ffff
  22. #endif
  23. /* Bits in XER */
  24. #define XER_SO 0x80000000U
  25. #define XER_OV 0x40000000U
  26. #define XER_CA 0x20000000U
  27. #define XER_OV32 0x00080000U
  28. #define XER_CA32 0x00040000U
  29. #ifdef CONFIG_VSX
  30. #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe))
  31. #endif
  32. #ifdef CONFIG_PPC_FPU
  33. /*
  34. * Functions in ldstfp.S
  35. */
  36. extern void get_fpr(int rn, double *p);
  37. extern void put_fpr(int rn, const double *p);
  38. extern void get_vr(int rn, __vector128 *p);
  39. extern void put_vr(int rn, __vector128 *p);
  40. extern void load_vsrn(int vsr, const void *p);
  41. extern void store_vsrn(int vsr, void *p);
  42. extern void conv_sp_to_dp(const float *sp, double *dp);
  43. extern void conv_dp_to_sp(const double *dp, float *sp);
  44. #endif
  45. #ifdef __powerpc64__
  46. /*
  47. * Functions in quad.S
  48. */
  49. extern int do_lq(unsigned long ea, unsigned long *regs);
  50. extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
  51. extern int do_lqarx(unsigned long ea, unsigned long *regs);
  52. extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
  53. unsigned int *crp);
  54. #endif
  55. #ifdef __LITTLE_ENDIAN__
  56. #define IS_LE 1
  57. #define IS_BE 0
  58. #else
  59. #define IS_LE 0
  60. #define IS_BE 1
  61. #endif
  62. /*
  63. * Emulate the truncation of 64 bit values in 32-bit mode.
  64. */
  65. static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
  66. unsigned long val)
  67. {
  68. if ((msr & MSR_64BIT) == 0)
  69. val &= 0xffffffffUL;
  70. return val;
  71. }
  72. /*
  73. * Determine whether a conditional branch instruction would branch.
  74. */
  75. static nokprobe_inline int branch_taken(unsigned int instr,
  76. const struct pt_regs *regs,
  77. struct instruction_op *op)
  78. {
  79. unsigned int bo = (instr >> 21) & 0x1f;
  80. unsigned int bi;
  81. if ((bo & 4) == 0) {
  82. /* decrement counter */
  83. op->type |= DECCTR;
  84. if (((bo >> 1) & 1) ^ (regs->ctr == 1))
  85. return 0;
  86. }
  87. if ((bo & 0x10) == 0) {
  88. /* check bit from CR */
  89. bi = (instr >> 16) & 0x1f;
  90. if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
  91. return 0;
  92. }
  93. return 1;
  94. }
  95. static nokprobe_inline long address_ok(struct pt_regs *regs,
  96. unsigned long ea, int nb)
  97. {
  98. if (!user_mode(regs))
  99. return 1;
  100. if (access_ok((void __user *)ea, nb))
  101. return 1;
  102. if (access_ok((void __user *)ea, 1))
  103. /* Access overlaps the end of the user region */
  104. regs->dar = TASK_SIZE_MAX - 1;
  105. else
  106. regs->dar = ea;
  107. return 0;
  108. }
  109. /*
  110. * Calculate effective address for a D-form instruction
  111. */
  112. static nokprobe_inline unsigned long dform_ea(unsigned int instr,
  113. const struct pt_regs *regs)
  114. {
  115. int ra;
  116. unsigned long ea;
  117. ra = (instr >> 16) & 0x1f;
  118. ea = (signed short) instr; /* sign-extend */
  119. if (ra)
  120. ea += regs->gpr[ra];
  121. return ea;
  122. }
  123. #ifdef __powerpc64__
  124. /*
  125. * Calculate effective address for a DS-form instruction
  126. */
  127. static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
  128. const struct pt_regs *regs)
  129. {
  130. int ra;
  131. unsigned long ea;
  132. ra = (instr >> 16) & 0x1f;
  133. ea = (signed short) (instr & ~3); /* sign-extend */
  134. if (ra)
  135. ea += regs->gpr[ra];
  136. return ea;
  137. }
  138. /*
  139. * Calculate effective address for a DQ-form instruction
  140. */
  141. static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
  142. const struct pt_regs *regs)
  143. {
  144. int ra;
  145. unsigned long ea;
  146. ra = (instr >> 16) & 0x1f;
  147. ea = (signed short) (instr & ~0xf); /* sign-extend */
  148. if (ra)
  149. ea += regs->gpr[ra];
  150. return ea;
  151. }
  152. #endif /* __powerpc64 */
  153. /*
  154. * Calculate effective address for an X-form instruction
  155. */
  156. static nokprobe_inline unsigned long xform_ea(unsigned int instr,
  157. const struct pt_regs *regs)
  158. {
  159. int ra, rb;
  160. unsigned long ea;
  161. ra = (instr >> 16) & 0x1f;
  162. rb = (instr >> 11) & 0x1f;
  163. ea = regs->gpr[rb];
  164. if (ra)
  165. ea += regs->gpr[ra];
  166. return ea;
  167. }
  168. /*
  169. * Calculate effective address for a MLS:D-form / 8LS:D-form
  170. * prefixed instruction
  171. */
  172. static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
  173. unsigned int suffix,
  174. const struct pt_regs *regs)
  175. {
  176. int ra, prefix_r;
  177. unsigned int dd;
  178. unsigned long ea, d0, d1, d;
  179. prefix_r = GET_PREFIX_R(instr);
  180. ra = GET_PREFIX_RA(suffix);
  181. d0 = instr & 0x3ffff;
  182. d1 = suffix & 0xffff;
  183. d = (d0 << 16) | d1;
  184. /*
  185. * sign extend a 34 bit number
  186. */
  187. dd = (unsigned int)(d >> 2);
  188. ea = (signed int)dd;
  189. ea = (ea << 2) | (d & 0x3);
  190. if (!prefix_r && ra)
  191. ea += regs->gpr[ra];
  192. else if (!prefix_r && !ra)
  193. ; /* Leave ea as is */
  194. else if (prefix_r)
  195. ea += regs->nip;
  196. /*
  197. * (prefix_r && ra) is an invalid form. Should already be
  198. * checked for by caller!
  199. */
  200. return ea;
  201. }
  202. /*
  203. * Return the largest power of 2, not greater than sizeof(unsigned long),
  204. * such that x is a multiple of it.
  205. */
  206. static nokprobe_inline unsigned long max_align(unsigned long x)
  207. {
  208. x |= sizeof(unsigned long);
  209. return x & -x; /* isolates rightmost bit */
  210. }
  211. static nokprobe_inline unsigned long byterev_2(unsigned long x)
  212. {
  213. return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
  214. }
  215. static nokprobe_inline unsigned long byterev_4(unsigned long x)
  216. {
  217. return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
  218. ((x & 0xff00) << 8) | ((x & 0xff) << 24);
  219. }
  220. #ifdef __powerpc64__
  221. static nokprobe_inline unsigned long byterev_8(unsigned long x)
  222. {
  223. return (byterev_4(x) << 32) | byterev_4(x >> 32);
  224. }
  225. #endif
  226. static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
  227. {
  228. switch (nb) {
  229. case 2:
  230. *(u16 *)ptr = byterev_2(*(u16 *)ptr);
  231. break;
  232. case 4:
  233. *(u32 *)ptr = byterev_4(*(u32 *)ptr);
  234. break;
  235. #ifdef __powerpc64__
  236. case 8:
  237. *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
  238. break;
  239. case 16: {
  240. unsigned long *up = (unsigned long *)ptr;
  241. unsigned long tmp;
  242. tmp = byterev_8(up[0]);
  243. up[0] = byterev_8(up[1]);
  244. up[1] = tmp;
  245. break;
  246. }
  247. case 32: {
  248. unsigned long *up = (unsigned long *)ptr;
  249. unsigned long tmp;
  250. tmp = byterev_8(up[0]);
  251. up[0] = byterev_8(up[3]);
  252. up[3] = tmp;
  253. tmp = byterev_8(up[2]);
  254. up[2] = byterev_8(up[1]);
  255. up[1] = tmp;
  256. break;
  257. }
  258. #endif
  259. default:
  260. WARN_ON_ONCE(1);
  261. }
  262. }
  263. static __always_inline int
  264. __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
  265. {
  266. unsigned long x = 0;
  267. switch (nb) {
  268. case 1:
  269. unsafe_get_user(x, (unsigned char __user *)ea, Efault);
  270. break;
  271. case 2:
  272. unsafe_get_user(x, (unsigned short __user *)ea, Efault);
  273. break;
  274. case 4:
  275. unsafe_get_user(x, (unsigned int __user *)ea, Efault);
  276. break;
  277. #ifdef __powerpc64__
  278. case 8:
  279. unsafe_get_user(x, (unsigned long __user *)ea, Efault);
  280. break;
  281. #endif
  282. }
  283. *dest = x;
  284. return 0;
  285. Efault:
  286. regs->dar = ea;
  287. return -EFAULT;
  288. }
  289. static nokprobe_inline int
  290. read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
  291. {
  292. int err;
  293. if (is_kernel_addr(ea))
  294. return __read_mem_aligned(dest, ea, nb, regs);
  295. if (user_read_access_begin((void __user *)ea, nb)) {
  296. err = __read_mem_aligned(dest, ea, nb, regs);
  297. user_read_access_end();
  298. } else {
  299. err = -EFAULT;
  300. regs->dar = ea;
  301. }
  302. return err;
  303. }
  304. /*
  305. * Copy from userspace to a buffer, using the largest possible
  306. * aligned accesses, up to sizeof(long).
  307. */
  308. static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
  309. {
  310. int c;
  311. for (; nb > 0; nb -= c) {
  312. c = max_align(ea);
  313. if (c > nb)
  314. c = max_align(nb);
  315. switch (c) {
  316. case 1:
  317. unsafe_get_user(*dest, (u8 __user *)ea, Efault);
  318. break;
  319. case 2:
  320. unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
  321. break;
  322. case 4:
  323. unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
  324. break;
  325. #ifdef __powerpc64__
  326. case 8:
  327. unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
  328. break;
  329. #endif
  330. }
  331. dest += c;
  332. ea += c;
  333. }
  334. return 0;
  335. Efault:
  336. regs->dar = ea;
  337. return -EFAULT;
  338. }
  339. static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
  340. {
  341. int err;
  342. if (is_kernel_addr(ea))
  343. return __copy_mem_in(dest, ea, nb, regs);
  344. if (user_read_access_begin((void __user *)ea, nb)) {
  345. err = __copy_mem_in(dest, ea, nb, regs);
  346. user_read_access_end();
  347. } else {
  348. err = -EFAULT;
  349. regs->dar = ea;
  350. }
  351. return err;
  352. }
  353. static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
  354. unsigned long ea, int nb,
  355. struct pt_regs *regs)
  356. {
  357. union {
  358. unsigned long ul;
  359. u8 b[sizeof(unsigned long)];
  360. } u;
  361. int i;
  362. int err;
  363. u.ul = 0;
  364. i = IS_BE ? sizeof(unsigned long) - nb : 0;
  365. err = copy_mem_in(&u.b[i], ea, nb, regs);
  366. if (!err)
  367. *dest = u.ul;
  368. return err;
  369. }
  370. /*
  371. * Read memory at address ea for nb bytes, return 0 for success
  372. * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
  373. * If nb < sizeof(long), the result is right-justified on BE systems.
  374. */
  375. static int read_mem(unsigned long *dest, unsigned long ea, int nb,
  376. struct pt_regs *regs)
  377. {
  378. if (!address_ok(regs, ea, nb))
  379. return -EFAULT;
  380. if ((ea & (nb - 1)) == 0)
  381. return read_mem_aligned(dest, ea, nb, regs);
  382. return read_mem_unaligned(dest, ea, nb, regs);
  383. }
  384. NOKPROBE_SYMBOL(read_mem);
  385. static __always_inline int
  386. __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
  387. {
  388. switch (nb) {
  389. case 1:
  390. unsafe_put_user(val, (unsigned char __user *)ea, Efault);
  391. break;
  392. case 2:
  393. unsafe_put_user(val, (unsigned short __user *)ea, Efault);
  394. break;
  395. case 4:
  396. unsafe_put_user(val, (unsigned int __user *)ea, Efault);
  397. break;
  398. #ifdef __powerpc64__
  399. case 8:
  400. unsafe_put_user(val, (unsigned long __user *)ea, Efault);
  401. break;
  402. #endif
  403. }
  404. return 0;
  405. Efault:
  406. regs->dar = ea;
  407. return -EFAULT;
  408. }
  409. static nokprobe_inline int
  410. write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
  411. {
  412. int err;
  413. if (is_kernel_addr(ea))
  414. return __write_mem_aligned(val, ea, nb, regs);
  415. if (user_write_access_begin((void __user *)ea, nb)) {
  416. err = __write_mem_aligned(val, ea, nb, regs);
  417. user_write_access_end();
  418. } else {
  419. err = -EFAULT;
  420. regs->dar = ea;
  421. }
  422. return err;
  423. }
  424. /*
  425. * Copy from a buffer to userspace, using the largest possible
  426. * aligned accesses, up to sizeof(long).
  427. */
  428. static nokprobe_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
  429. {
  430. int c;
  431. for (; nb > 0; nb -= c) {
  432. c = max_align(ea);
  433. if (c > nb)
  434. c = max_align(nb);
  435. switch (c) {
  436. case 1:
  437. unsafe_put_user(*dest, (u8 __user *)ea, Efault);
  438. break;
  439. case 2:
  440. unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
  441. break;
  442. case 4:
  443. unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
  444. break;
  445. #ifdef __powerpc64__
  446. case 8:
  447. unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
  448. break;
  449. #endif
  450. }
  451. dest += c;
  452. ea += c;
  453. }
  454. return 0;
  455. Efault:
  456. regs->dar = ea;
  457. return -EFAULT;
  458. }
  459. static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
  460. {
  461. int err;
  462. if (is_kernel_addr(ea))
  463. return __copy_mem_out(dest, ea, nb, regs);
  464. if (user_write_access_begin((void __user *)ea, nb)) {
  465. err = __copy_mem_out(dest, ea, nb, regs);
  466. user_write_access_end();
  467. } else {
  468. err = -EFAULT;
  469. regs->dar = ea;
  470. }
  471. return err;
  472. }
  473. static nokprobe_inline int write_mem_unaligned(unsigned long val,
  474. unsigned long ea, int nb,
  475. struct pt_regs *regs)
  476. {
  477. union {
  478. unsigned long ul;
  479. u8 b[sizeof(unsigned long)];
  480. } u;
  481. int i;
  482. u.ul = val;
  483. i = IS_BE ? sizeof(unsigned long) - nb : 0;
  484. return copy_mem_out(&u.b[i], ea, nb, regs);
  485. }
  486. /*
  487. * Write memory at address ea for nb bytes, return 0 for success
  488. * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
  489. */
  490. static int write_mem(unsigned long val, unsigned long ea, int nb,
  491. struct pt_regs *regs)
  492. {
  493. if (!address_ok(regs, ea, nb))
  494. return -EFAULT;
  495. if ((ea & (nb - 1)) == 0)
  496. return write_mem_aligned(val, ea, nb, regs);
  497. return write_mem_unaligned(val, ea, nb, regs);
  498. }
  499. NOKPROBE_SYMBOL(write_mem);
  500. #ifdef CONFIG_PPC_FPU
  501. /*
  502. * These access either the real FP register or the image in the
  503. * thread_struct, depending on regs->msr & MSR_FP.
  504. */
  505. static int do_fp_load(struct instruction_op *op, unsigned long ea,
  506. struct pt_regs *regs, bool cross_endian)
  507. {
  508. int err, rn, nb;
  509. union {
  510. int i;
  511. unsigned int u;
  512. float f;
  513. double d[2];
  514. unsigned long l[2];
  515. u8 b[2 * sizeof(double)];
  516. } u;
  517. nb = GETSIZE(op->type);
  518. if (!address_ok(regs, ea, nb))
  519. return -EFAULT;
  520. rn = op->reg;
  521. err = copy_mem_in(u.b, ea, nb, regs);
  522. if (err)
  523. return err;
  524. if (unlikely(cross_endian)) {
  525. do_byte_reverse(u.b, min(nb, 8));
  526. if (nb == 16)
  527. do_byte_reverse(&u.b[8], 8);
  528. }
  529. preempt_disable();
  530. if (nb == 4) {
  531. if (op->type & FPCONV)
  532. conv_sp_to_dp(&u.f, &u.d[0]);
  533. else if (op->type & SIGNEXT)
  534. u.l[0] = u.i;
  535. else
  536. u.l[0] = u.u;
  537. }
  538. if (regs->msr & MSR_FP)
  539. put_fpr(rn, &u.d[0]);
  540. else
  541. current->thread.TS_FPR(rn) = u.l[0];
  542. if (nb == 16) {
  543. /* lfdp */
  544. rn |= 1;
  545. if (regs->msr & MSR_FP)
  546. put_fpr(rn, &u.d[1]);
  547. else
  548. current->thread.TS_FPR(rn) = u.l[1];
  549. }
  550. preempt_enable();
  551. return 0;
  552. }
  553. NOKPROBE_SYMBOL(do_fp_load);
  554. static int do_fp_store(struct instruction_op *op, unsigned long ea,
  555. struct pt_regs *regs, bool cross_endian)
  556. {
  557. int rn, nb;
  558. union {
  559. unsigned int u;
  560. float f;
  561. double d[2];
  562. unsigned long l[2];
  563. u8 b[2 * sizeof(double)];
  564. } u;
  565. nb = GETSIZE(op->type);
  566. if (!address_ok(regs, ea, nb))
  567. return -EFAULT;
  568. rn = op->reg;
  569. preempt_disable();
  570. if (regs->msr & MSR_FP)
  571. get_fpr(rn, &u.d[0]);
  572. else
  573. u.l[0] = current->thread.TS_FPR(rn);
  574. if (nb == 4) {
  575. if (op->type & FPCONV)
  576. conv_dp_to_sp(&u.d[0], &u.f);
  577. else
  578. u.u = u.l[0];
  579. }
  580. if (nb == 16) {
  581. rn |= 1;
  582. if (regs->msr & MSR_FP)
  583. get_fpr(rn, &u.d[1]);
  584. else
  585. u.l[1] = current->thread.TS_FPR(rn);
  586. }
  587. preempt_enable();
  588. if (unlikely(cross_endian)) {
  589. do_byte_reverse(u.b, min(nb, 8));
  590. if (nb == 16)
  591. do_byte_reverse(&u.b[8], 8);
  592. }
  593. return copy_mem_out(u.b, ea, nb, regs);
  594. }
  595. NOKPROBE_SYMBOL(do_fp_store);
  596. #endif
  597. #ifdef CONFIG_ALTIVEC
  598. /* For Altivec/VMX, no need to worry about alignment */
  599. static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
  600. int size, struct pt_regs *regs,
  601. bool cross_endian)
  602. {
  603. int err;
  604. union {
  605. __vector128 v;
  606. u8 b[sizeof(__vector128)];
  607. } u = {};
  608. if (!address_ok(regs, ea & ~0xfUL, 16))
  609. return -EFAULT;
  610. /* align to multiple of size */
  611. ea &= ~(size - 1);
  612. err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
  613. if (err)
  614. return err;
  615. if (unlikely(cross_endian))
  616. do_byte_reverse(&u.b[ea & 0xf], size);
  617. preempt_disable();
  618. if (regs->msr & MSR_VEC)
  619. put_vr(rn, &u.v);
  620. else
  621. current->thread.vr_state.vr[rn] = u.v;
  622. preempt_enable();
  623. return 0;
  624. }
  625. static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
  626. int size, struct pt_regs *regs,
  627. bool cross_endian)
  628. {
  629. union {
  630. __vector128 v;
  631. u8 b[sizeof(__vector128)];
  632. } u;
  633. if (!address_ok(regs, ea & ~0xfUL, 16))
  634. return -EFAULT;
  635. /* align to multiple of size */
  636. ea &= ~(size - 1);
  637. preempt_disable();
  638. if (regs->msr & MSR_VEC)
  639. get_vr(rn, &u.v);
  640. else
  641. u.v = current->thread.vr_state.vr[rn];
  642. preempt_enable();
  643. if (unlikely(cross_endian))
  644. do_byte_reverse(&u.b[ea & 0xf], size);
  645. return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
  646. }
  647. #endif /* CONFIG_ALTIVEC */
  648. #ifdef __powerpc64__
  649. static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
  650. int reg, bool cross_endian)
  651. {
  652. int err;
  653. if (!address_ok(regs, ea, 16))
  654. return -EFAULT;
  655. /* if aligned, should be atomic */
  656. if ((ea & 0xf) == 0) {
  657. err = do_lq(ea, &regs->gpr[reg]);
  658. } else {
  659. err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
  660. if (!err)
  661. err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
  662. }
  663. if (!err && unlikely(cross_endian))
  664. do_byte_reverse(&regs->gpr[reg], 16);
  665. return err;
  666. }
  667. static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
  668. int reg, bool cross_endian)
  669. {
  670. int err;
  671. unsigned long vals[2];
  672. if (!address_ok(regs, ea, 16))
  673. return -EFAULT;
  674. vals[0] = regs->gpr[reg];
  675. vals[1] = regs->gpr[reg + 1];
  676. if (unlikely(cross_endian))
  677. do_byte_reverse(vals, 16);
  678. /* if aligned, should be atomic */
  679. if ((ea & 0xf) == 0)
  680. return do_stq(ea, vals[0], vals[1]);
  681. err = write_mem(vals[IS_LE], ea, 8, regs);
  682. if (!err)
  683. err = write_mem(vals[IS_BE], ea + 8, 8, regs);
  684. return err;
  685. }
  686. #endif /* __powerpc64 */
  687. #ifdef CONFIG_VSX
  688. void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
  689. const void *mem, bool rev)
  690. {
  691. int size, read_size;
  692. int i, j;
  693. const unsigned int *wp;
  694. const unsigned short *hp;
  695. const unsigned char *bp;
  696. size = GETSIZE(op->type);
  697. reg->d[0] = reg->d[1] = 0;
  698. switch (op->element_size) {
  699. case 32:
  700. /* [p]lxvp[x] */
  701. case 16:
  702. /* whole vector; lxv[x] or lxvl[l] */
  703. if (size == 0)
  704. break;
  705. memcpy(reg, mem, size);
  706. if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
  707. rev = !rev;
  708. if (rev)
  709. do_byte_reverse(reg, size);
  710. break;
  711. case 8:
  712. /* scalar loads, lxvd2x, lxvdsx */
  713. read_size = (size >= 8) ? 8 : size;
  714. i = IS_LE ? 8 : 8 - read_size;
  715. memcpy(&reg->b[i], mem, read_size);
  716. if (rev)
  717. do_byte_reverse(&reg->b[i], 8);
  718. if (size < 8) {
  719. if (op->type & SIGNEXT) {
  720. /* size == 4 is the only case here */
  721. reg->d[IS_LE] = (signed int) reg->d[IS_LE];
  722. } else if (op->vsx_flags & VSX_FPCONV) {
  723. preempt_disable();
  724. conv_sp_to_dp(&reg->fp[1 + IS_LE],
  725. &reg->dp[IS_LE]);
  726. preempt_enable();
  727. }
  728. } else {
  729. if (size == 16) {
  730. unsigned long v = *(unsigned long *)(mem + 8);
  731. reg->d[IS_BE] = !rev ? v : byterev_8(v);
  732. } else if (op->vsx_flags & VSX_SPLAT)
  733. reg->d[IS_BE] = reg->d[IS_LE];
  734. }
  735. break;
  736. case 4:
  737. /* lxvw4x, lxvwsx */
  738. wp = mem;
  739. for (j = 0; j < size / 4; ++j) {
  740. i = IS_LE ? 3 - j : j;
  741. reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
  742. }
  743. if (op->vsx_flags & VSX_SPLAT) {
  744. u32 val = reg->w[IS_LE ? 3 : 0];
  745. for (; j < 4; ++j) {
  746. i = IS_LE ? 3 - j : j;
  747. reg->w[i] = val;
  748. }
  749. }
  750. break;
  751. case 2:
  752. /* lxvh8x */
  753. hp = mem;
  754. for (j = 0; j < size / 2; ++j) {
  755. i = IS_LE ? 7 - j : j;
  756. reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
  757. }
  758. break;
  759. case 1:
  760. /* lxvb16x */
  761. bp = mem;
  762. for (j = 0; j < size; ++j) {
  763. i = IS_LE ? 15 - j : j;
  764. reg->b[i] = *bp++;
  765. }
  766. break;
  767. }
  768. }
  769. EXPORT_SYMBOL_GPL(emulate_vsx_load);
  770. NOKPROBE_SYMBOL(emulate_vsx_load);
  771. void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
  772. void *mem, bool rev)
  773. {
  774. int size, write_size;
  775. int i, j;
  776. union vsx_reg buf;
  777. unsigned int *wp;
  778. unsigned short *hp;
  779. unsigned char *bp;
  780. size = GETSIZE(op->type);
  781. switch (op->element_size) {
  782. case 32:
  783. /* [p]stxvp[x] */
  784. if (size == 0)
  785. break;
  786. if (rev) {
  787. /* reverse 32 bytes */
  788. union vsx_reg buf32[2];
  789. buf32[0].d[0] = byterev_8(reg[1].d[1]);
  790. buf32[0].d[1] = byterev_8(reg[1].d[0]);
  791. buf32[1].d[0] = byterev_8(reg[0].d[1]);
  792. buf32[1].d[1] = byterev_8(reg[0].d[0]);
  793. memcpy(mem, buf32, size);
  794. } else {
  795. memcpy(mem, reg, size);
  796. }
  797. break;
  798. case 16:
  799. /* stxv, stxvx, stxvl, stxvll */
  800. if (size == 0)
  801. break;
  802. if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
  803. rev = !rev;
  804. if (rev) {
  805. /* reverse 16 bytes */
  806. buf.d[0] = byterev_8(reg->d[1]);
  807. buf.d[1] = byterev_8(reg->d[0]);
  808. reg = &buf;
  809. }
  810. memcpy(mem, reg, size);
  811. break;
  812. case 8:
  813. /* scalar stores, stxvd2x */
  814. write_size = (size >= 8) ? 8 : size;
  815. i = IS_LE ? 8 : 8 - write_size;
  816. if (size < 8 && op->vsx_flags & VSX_FPCONV) {
  817. buf.d[0] = buf.d[1] = 0;
  818. preempt_disable();
  819. conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
  820. preempt_enable();
  821. reg = &buf;
  822. }
  823. memcpy(mem, &reg->b[i], write_size);
  824. if (size == 16)
  825. memcpy(mem + 8, &reg->d[IS_BE], 8);
  826. if (unlikely(rev)) {
  827. do_byte_reverse(mem, write_size);
  828. if (size == 16)
  829. do_byte_reverse(mem + 8, 8);
  830. }
  831. break;
  832. case 4:
  833. /* stxvw4x */
  834. wp = mem;
  835. for (j = 0; j < size / 4; ++j) {
  836. i = IS_LE ? 3 - j : j;
  837. *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
  838. }
  839. break;
  840. case 2:
  841. /* stxvh8x */
  842. hp = mem;
  843. for (j = 0; j < size / 2; ++j) {
  844. i = IS_LE ? 7 - j : j;
  845. *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
  846. }
  847. break;
  848. case 1:
  849. /* stvxb16x */
  850. bp = mem;
  851. for (j = 0; j < size; ++j) {
  852. i = IS_LE ? 15 - j : j;
  853. *bp++ = reg->b[i];
  854. }
  855. break;
  856. }
  857. }
  858. EXPORT_SYMBOL_GPL(emulate_vsx_store);
  859. NOKPROBE_SYMBOL(emulate_vsx_store);
  860. static nokprobe_inline int do_vsx_load(struct instruction_op *op,
  861. unsigned long ea, struct pt_regs *regs,
  862. bool cross_endian)
  863. {
  864. int reg = op->reg;
  865. int i, j, nr_vsx_regs;
  866. u8 mem[32];
  867. union vsx_reg buf[2];
  868. int size = GETSIZE(op->type);
  869. if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
  870. return -EFAULT;
  871. nr_vsx_regs = max(1ul, size / sizeof(__vector128));
  872. emulate_vsx_load(op, buf, mem, cross_endian);
  873. preempt_disable();
  874. if (reg < 32) {
  875. /* FP regs + extensions */
  876. if (regs->msr & MSR_FP) {
  877. for (i = 0; i < nr_vsx_regs; i++) {
  878. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  879. load_vsrn(reg + i, &buf[j].v);
  880. }
  881. } else {
  882. for (i = 0; i < nr_vsx_regs; i++) {
  883. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  884. current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
  885. current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
  886. }
  887. }
  888. } else {
  889. if (regs->msr & MSR_VEC) {
  890. for (i = 0; i < nr_vsx_regs; i++) {
  891. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  892. load_vsrn(reg + i, &buf[j].v);
  893. }
  894. } else {
  895. for (i = 0; i < nr_vsx_regs; i++) {
  896. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  897. current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
  898. }
  899. }
  900. }
  901. preempt_enable();
  902. return 0;
  903. }
  904. static nokprobe_inline int do_vsx_store(struct instruction_op *op,
  905. unsigned long ea, struct pt_regs *regs,
  906. bool cross_endian)
  907. {
  908. int reg = op->reg;
  909. int i, j, nr_vsx_regs;
  910. u8 mem[32];
  911. union vsx_reg buf[2];
  912. int size = GETSIZE(op->type);
  913. if (!address_ok(regs, ea, size))
  914. return -EFAULT;
  915. nr_vsx_regs = max(1ul, size / sizeof(__vector128));
  916. preempt_disable();
  917. if (reg < 32) {
  918. /* FP regs + extensions */
  919. if (regs->msr & MSR_FP) {
  920. for (i = 0; i < nr_vsx_regs; i++) {
  921. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  922. store_vsrn(reg + i, &buf[j].v);
  923. }
  924. } else {
  925. for (i = 0; i < nr_vsx_regs; i++) {
  926. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  927. buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
  928. buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
  929. }
  930. }
  931. } else {
  932. if (regs->msr & MSR_VEC) {
  933. for (i = 0; i < nr_vsx_regs; i++) {
  934. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  935. store_vsrn(reg + i, &buf[j].v);
  936. }
  937. } else {
  938. for (i = 0; i < nr_vsx_regs; i++) {
  939. j = IS_LE ? nr_vsx_regs - i - 1 : i;
  940. buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
  941. }
  942. }
  943. }
  944. preempt_enable();
  945. emulate_vsx_store(op, buf, mem, cross_endian);
  946. return copy_mem_out(mem, ea, size, regs);
  947. }
  948. #endif /* CONFIG_VSX */
  949. static int __emulate_dcbz(unsigned long ea)
  950. {
  951. unsigned long i;
  952. unsigned long size = l1_dcache_bytes();
  953. for (i = 0; i < size; i += sizeof(long))
  954. unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
  955. return 0;
  956. Efault:
  957. return -EFAULT;
  958. }
  959. int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
  960. {
  961. int err;
  962. unsigned long size = l1_dcache_bytes();
  963. ea = truncate_if_32bit(regs->msr, ea);
  964. ea &= ~(size - 1);
  965. if (!address_ok(regs, ea, size))
  966. return -EFAULT;
  967. if (is_kernel_addr(ea)) {
  968. err = __emulate_dcbz(ea);
  969. } else if (user_write_access_begin((void __user *)ea, size)) {
  970. err = __emulate_dcbz(ea);
  971. user_write_access_end();
  972. } else {
  973. err = -EFAULT;
  974. }
  975. if (err)
  976. regs->dar = ea;
  977. return err;
  978. }
  979. NOKPROBE_SYMBOL(emulate_dcbz);
  980. #define __put_user_asmx(x, addr, err, op, cr) \
  981. __asm__ __volatile__( \
  982. ".machine push\n" \
  983. ".machine power8\n" \
  984. "1: " op " %2,0,%3\n" \
  985. ".machine pop\n" \
  986. " mfcr %1\n" \
  987. "2:\n" \
  988. ".section .fixup,\"ax\"\n" \
  989. "3: li %0,%4\n" \
  990. " b 2b\n" \
  991. ".previous\n" \
  992. EX_TABLE(1b, 3b) \
  993. : "=r" (err), "=r" (cr) \
  994. : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
  995. #define __get_user_asmx(x, addr, err, op) \
  996. __asm__ __volatile__( \
  997. ".machine push\n" \
  998. ".machine power8\n" \
  999. "1: "op" %1,0,%2\n" \
  1000. ".machine pop\n" \
  1001. "2:\n" \
  1002. ".section .fixup,\"ax\"\n" \
  1003. "3: li %0,%3\n" \
  1004. " b 2b\n" \
  1005. ".previous\n" \
  1006. EX_TABLE(1b, 3b) \
  1007. : "=r" (err), "=r" (x) \
  1008. : "r" (addr), "i" (-EFAULT), "0" (err))
  1009. #define __cacheop_user_asmx(addr, err, op) \
  1010. __asm__ __volatile__( \
  1011. "1: "op" 0,%1\n" \
  1012. "2:\n" \
  1013. ".section .fixup,\"ax\"\n" \
  1014. "3: li %0,%3\n" \
  1015. " b 2b\n" \
  1016. ".previous\n" \
  1017. EX_TABLE(1b, 3b) \
  1018. : "=r" (err) \
  1019. : "r" (addr), "i" (-EFAULT), "0" (err))
  1020. static nokprobe_inline void set_cr0(const struct pt_regs *regs,
  1021. struct instruction_op *op)
  1022. {
  1023. long val = op->val;
  1024. op->type |= SETCC;
  1025. op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
  1026. if (!(regs->msr & MSR_64BIT))
  1027. val = (int) val;
  1028. if (val < 0)
  1029. op->ccval |= 0x80000000;
  1030. else if (val > 0)
  1031. op->ccval |= 0x40000000;
  1032. else
  1033. op->ccval |= 0x20000000;
  1034. }
  1035. static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
  1036. {
  1037. if (cpu_has_feature(CPU_FTR_ARCH_300)) {
  1038. if (val)
  1039. op->xerval |= XER_CA32;
  1040. else
  1041. op->xerval &= ~XER_CA32;
  1042. }
  1043. }
  1044. static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
  1045. struct instruction_op *op, int rd,
  1046. unsigned long val1, unsigned long val2,
  1047. unsigned long carry_in)
  1048. {
  1049. unsigned long val = val1 + val2;
  1050. if (carry_in)
  1051. ++val;
  1052. op->type = COMPUTE | SETREG | SETXER;
  1053. op->reg = rd;
  1054. op->val = val;
  1055. val = truncate_if_32bit(regs->msr, val);
  1056. val1 = truncate_if_32bit(regs->msr, val1);
  1057. op->xerval = regs->xer;
  1058. if (val < val1 || (carry_in && val == val1))
  1059. op->xerval |= XER_CA;
  1060. else
  1061. op->xerval &= ~XER_CA;
  1062. set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
  1063. (carry_in && (unsigned int)val == (unsigned int)val1));
  1064. }
  1065. static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
  1066. struct instruction_op *op,
  1067. long v1, long v2, int crfld)
  1068. {
  1069. unsigned int crval, shift;
  1070. op->type = COMPUTE | SETCC;
  1071. crval = (regs->xer >> 31) & 1; /* get SO bit */
  1072. if (v1 < v2)
  1073. crval |= 8;
  1074. else if (v1 > v2)
  1075. crval |= 4;
  1076. else
  1077. crval |= 2;
  1078. shift = (7 - crfld) * 4;
  1079. op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
  1080. }
  1081. static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
  1082. struct instruction_op *op,
  1083. unsigned long v1,
  1084. unsigned long v2, int crfld)
  1085. {
  1086. unsigned int crval, shift;
  1087. op->type = COMPUTE | SETCC;
  1088. crval = (regs->xer >> 31) & 1; /* get SO bit */
  1089. if (v1 < v2)
  1090. crval |= 8;
  1091. else if (v1 > v2)
  1092. crval |= 4;
  1093. else
  1094. crval |= 2;
  1095. shift = (7 - crfld) * 4;
  1096. op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
  1097. }
  1098. static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
  1099. struct instruction_op *op,
  1100. unsigned long v1, unsigned long v2)
  1101. {
  1102. unsigned long long out_val, mask;
  1103. int i;
  1104. out_val = 0;
  1105. for (i = 0; i < 8; i++) {
  1106. mask = 0xffUL << (i * 8);
  1107. if ((v1 & mask) == (v2 & mask))
  1108. out_val |= mask;
  1109. }
  1110. op->val = out_val;
  1111. }
  1112. /*
  1113. * The size parameter is used to adjust the equivalent popcnt instruction.
  1114. * popcntb = 8, popcntw = 32, popcntd = 64
  1115. */
  1116. static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
  1117. struct instruction_op *op,
  1118. unsigned long v1, int size)
  1119. {
  1120. unsigned long long out = v1;
  1121. out -= (out >> 1) & 0x5555555555555555ULL;
  1122. out = (0x3333333333333333ULL & out) +
  1123. (0x3333333333333333ULL & (out >> 2));
  1124. out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
  1125. if (size == 8) { /* popcntb */
  1126. op->val = out;
  1127. return;
  1128. }
  1129. out += out >> 8;
  1130. out += out >> 16;
  1131. if (size == 32) { /* popcntw */
  1132. op->val = out & 0x0000003f0000003fULL;
  1133. return;
  1134. }
  1135. out = (out + (out >> 32)) & 0x7f;
  1136. op->val = out; /* popcntd */
  1137. }
  1138. #ifdef CONFIG_PPC64
  1139. static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
  1140. struct instruction_op *op,
  1141. unsigned long v1, unsigned long v2)
  1142. {
  1143. unsigned char perm, idx;
  1144. unsigned int i;
  1145. perm = 0;
  1146. for (i = 0; i < 8; i++) {
  1147. idx = (v1 >> (i * 8)) & 0xff;
  1148. if (idx < 64)
  1149. if (v2 & PPC_BIT(idx))
  1150. perm |= 1 << i;
  1151. }
  1152. op->val = perm;
  1153. }
  1154. #endif /* CONFIG_PPC64 */
  1155. /*
  1156. * The size parameter adjusts the equivalent prty instruction.
  1157. * prtyw = 32, prtyd = 64
  1158. */
  1159. static nokprobe_inline void do_prty(const struct pt_regs *regs,
  1160. struct instruction_op *op,
  1161. unsigned long v, int size)
  1162. {
  1163. unsigned long long res = v ^ (v >> 8);
  1164. res ^= res >> 16;
  1165. if (size == 32) { /* prtyw */
  1166. op->val = res & 0x0000000100000001ULL;
  1167. return;
  1168. }
  1169. res ^= res >> 32;
  1170. op->val = res & 1; /*prtyd */
  1171. }
  1172. static nokprobe_inline int trap_compare(long v1, long v2)
  1173. {
  1174. int ret = 0;
  1175. if (v1 < v2)
  1176. ret |= 0x10;
  1177. else if (v1 > v2)
  1178. ret |= 0x08;
  1179. else
  1180. ret |= 0x04;
  1181. if ((unsigned long)v1 < (unsigned long)v2)
  1182. ret |= 0x02;
  1183. else if ((unsigned long)v1 > (unsigned long)v2)
  1184. ret |= 0x01;
  1185. return ret;
  1186. }
  1187. /*
  1188. * Elements of 32-bit rotate and mask instructions.
  1189. */
  1190. #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
  1191. ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
  1192. #ifdef __powerpc64__
  1193. #define MASK64_L(mb) (~0UL >> (mb))
  1194. #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
  1195. #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
  1196. #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
  1197. #else
  1198. #define DATA32(x) (x)
  1199. #endif
  1200. #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
  1201. /*
  1202. * Decode an instruction, and return information about it in *op
  1203. * without changing *regs.
  1204. * Integer arithmetic and logical instructions, branches, and barrier
  1205. * instructions can be emulated just using the information in *op.
  1206. *
  1207. * Return value is 1 if the instruction can be emulated just by
  1208. * updating *regs with the information in *op, -1 if we need the
  1209. * GPRs but *regs doesn't contain the full register set, or 0
  1210. * otherwise.
  1211. */
  1212. int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
  1213. ppc_inst_t instr)
  1214. {
  1215. #ifdef CONFIG_PPC64
  1216. unsigned int suffixopcode, prefixtype, prefix_r;
  1217. #endif
  1218. unsigned int opcode, ra, rb, rc, rd, spr, u;
  1219. unsigned long int imm;
  1220. unsigned long int val, val2;
  1221. unsigned int mb, me, sh;
  1222. unsigned int word, suffix;
  1223. long ival;
  1224. word = ppc_inst_val(instr);
  1225. suffix = ppc_inst_suffix(instr);
  1226. op->type = COMPUTE;
  1227. opcode = ppc_inst_primary_opcode(instr);
  1228. switch (opcode) {
  1229. case 16: /* bc */
  1230. op->type = BRANCH;
  1231. imm = (signed short)(word & 0xfffc);
  1232. if ((word & 2) == 0)
  1233. imm += regs->nip;
  1234. op->val = truncate_if_32bit(regs->msr, imm);
  1235. if (word & 1)
  1236. op->type |= SETLK;
  1237. if (branch_taken(word, regs, op))
  1238. op->type |= BRTAKEN;
  1239. return 1;
  1240. case 17: /* sc */
  1241. if ((word & 0xfe2) == 2)
  1242. op->type = SYSCALL;
  1243. else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
  1244. (word & 0xfe3) == 1) { /* scv */
  1245. op->type = SYSCALL_VECTORED_0;
  1246. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1247. goto unknown_opcode;
  1248. } else
  1249. op->type = UNKNOWN;
  1250. return 0;
  1251. case 18: /* b */
  1252. op->type = BRANCH | BRTAKEN;
  1253. imm = word & 0x03fffffc;
  1254. if (imm & 0x02000000)
  1255. imm -= 0x04000000;
  1256. if ((word & 2) == 0)
  1257. imm += regs->nip;
  1258. op->val = truncate_if_32bit(regs->msr, imm);
  1259. if (word & 1)
  1260. op->type |= SETLK;
  1261. return 1;
  1262. case 19:
  1263. switch ((word >> 1) & 0x3ff) {
  1264. case 0: /* mcrf */
  1265. op->type = COMPUTE + SETCC;
  1266. rd = 7 - ((word >> 23) & 0x7);
  1267. ra = 7 - ((word >> 18) & 0x7);
  1268. rd *= 4;
  1269. ra *= 4;
  1270. val = (regs->ccr >> ra) & 0xf;
  1271. op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
  1272. return 1;
  1273. case 16: /* bclr */
  1274. case 528: /* bcctr */
  1275. op->type = BRANCH;
  1276. imm = (word & 0x400)? regs->ctr: regs->link;
  1277. op->val = truncate_if_32bit(regs->msr, imm);
  1278. if (word & 1)
  1279. op->type |= SETLK;
  1280. if (branch_taken(word, regs, op))
  1281. op->type |= BRTAKEN;
  1282. return 1;
  1283. case 18: /* rfid, scary */
  1284. if (regs->msr & MSR_PR)
  1285. goto priv;
  1286. op->type = RFI;
  1287. return 0;
  1288. case 150: /* isync */
  1289. op->type = BARRIER | BARRIER_ISYNC;
  1290. return 1;
  1291. case 33: /* crnor */
  1292. case 129: /* crandc */
  1293. case 193: /* crxor */
  1294. case 225: /* crnand */
  1295. case 257: /* crand */
  1296. case 289: /* creqv */
  1297. case 417: /* crorc */
  1298. case 449: /* cror */
  1299. op->type = COMPUTE + SETCC;
  1300. ra = (word >> 16) & 0x1f;
  1301. rb = (word >> 11) & 0x1f;
  1302. rd = (word >> 21) & 0x1f;
  1303. ra = (regs->ccr >> (31 - ra)) & 1;
  1304. rb = (regs->ccr >> (31 - rb)) & 1;
  1305. val = (word >> (6 + ra * 2 + rb)) & 1;
  1306. op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
  1307. (val << (31 - rd));
  1308. return 1;
  1309. }
  1310. break;
  1311. case 31:
  1312. switch ((word >> 1) & 0x3ff) {
  1313. case 598: /* sync */
  1314. op->type = BARRIER + BARRIER_SYNC;
  1315. #ifdef __powerpc64__
  1316. switch ((word >> 21) & 3) {
  1317. case 1: /* lwsync */
  1318. op->type = BARRIER + BARRIER_LWSYNC;
  1319. break;
  1320. case 2: /* ptesync */
  1321. op->type = BARRIER + BARRIER_PTESYNC;
  1322. break;
  1323. }
  1324. #endif
  1325. return 1;
  1326. case 854: /* eieio */
  1327. op->type = BARRIER + BARRIER_EIEIO;
  1328. return 1;
  1329. }
  1330. break;
  1331. }
  1332. rd = (word >> 21) & 0x1f;
  1333. ra = (word >> 16) & 0x1f;
  1334. rb = (word >> 11) & 0x1f;
  1335. rc = (word >> 6) & 0x1f;
  1336. switch (opcode) {
  1337. #ifdef __powerpc64__
  1338. case 1:
  1339. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  1340. goto unknown_opcode;
  1341. prefix_r = GET_PREFIX_R(word);
  1342. ra = GET_PREFIX_RA(suffix);
  1343. rd = (suffix >> 21) & 0x1f;
  1344. op->reg = rd;
  1345. op->val = regs->gpr[rd];
  1346. suffixopcode = get_op(suffix);
  1347. prefixtype = (word >> 24) & 0x3;
  1348. switch (prefixtype) {
  1349. case 2:
  1350. if (prefix_r && ra)
  1351. return 0;
  1352. switch (suffixopcode) {
  1353. case 14: /* paddi */
  1354. op->type = COMPUTE | PREFIXED;
  1355. op->val = mlsd_8lsd_ea(word, suffix, regs);
  1356. goto compute_done;
  1357. }
  1358. }
  1359. break;
  1360. case 2: /* tdi */
  1361. if (rd & trap_compare(regs->gpr[ra], (short) word))
  1362. goto trap;
  1363. return 1;
  1364. #endif
  1365. case 3: /* twi */
  1366. if (rd & trap_compare((int)regs->gpr[ra], (short) word))
  1367. goto trap;
  1368. return 1;
  1369. #ifdef __powerpc64__
  1370. case 4:
  1371. /*
  1372. * There are very many instructions with this primary opcode
  1373. * introduced in the ISA as early as v2.03. However, the ones
  1374. * we currently emulate were all introduced with ISA 3.0
  1375. */
  1376. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1377. goto unknown_opcode;
  1378. switch (word & 0x3f) {
  1379. case 48: /* maddhd */
  1380. asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
  1381. "=r" (op->val) : "r" (regs->gpr[ra]),
  1382. "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
  1383. goto compute_done;
  1384. case 49: /* maddhdu */
  1385. asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
  1386. "=r" (op->val) : "r" (regs->gpr[ra]),
  1387. "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
  1388. goto compute_done;
  1389. case 51: /* maddld */
  1390. asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
  1391. "=r" (op->val) : "r" (regs->gpr[ra]),
  1392. "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
  1393. goto compute_done;
  1394. }
  1395. /*
  1396. * There are other instructions from ISA 3.0 with the same
  1397. * primary opcode which do not have emulation support yet.
  1398. */
  1399. goto unknown_opcode;
  1400. #endif
  1401. case 7: /* mulli */
  1402. op->val = regs->gpr[ra] * (short) word;
  1403. goto compute_done;
  1404. case 8: /* subfic */
  1405. imm = (short) word;
  1406. add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
  1407. return 1;
  1408. case 10: /* cmpli */
  1409. imm = (unsigned short) word;
  1410. val = regs->gpr[ra];
  1411. #ifdef __powerpc64__
  1412. if ((rd & 1) == 0)
  1413. val = (unsigned int) val;
  1414. #endif
  1415. do_cmp_unsigned(regs, op, val, imm, rd >> 2);
  1416. return 1;
  1417. case 11: /* cmpi */
  1418. imm = (short) word;
  1419. val = regs->gpr[ra];
  1420. #ifdef __powerpc64__
  1421. if ((rd & 1) == 0)
  1422. val = (int) val;
  1423. #endif
  1424. do_cmp_signed(regs, op, val, imm, rd >> 2);
  1425. return 1;
  1426. case 12: /* addic */
  1427. imm = (short) word;
  1428. add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
  1429. return 1;
  1430. case 13: /* addic. */
  1431. imm = (short) word;
  1432. add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
  1433. set_cr0(regs, op);
  1434. return 1;
  1435. case 14: /* addi */
  1436. imm = (short) word;
  1437. if (ra)
  1438. imm += regs->gpr[ra];
  1439. op->val = imm;
  1440. goto compute_done;
  1441. case 15: /* addis */
  1442. imm = ((short) word) << 16;
  1443. if (ra)
  1444. imm += regs->gpr[ra];
  1445. op->val = imm;
  1446. goto compute_done;
  1447. case 19:
  1448. if (((word >> 1) & 0x1f) == 2) {
  1449. /* addpcis */
  1450. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1451. goto unknown_opcode;
  1452. imm = (short) (word & 0xffc1); /* d0 + d2 fields */
  1453. imm |= (word >> 15) & 0x3e; /* d1 field */
  1454. op->val = regs->nip + (imm << 16) + 4;
  1455. goto compute_done;
  1456. }
  1457. op->type = UNKNOWN;
  1458. return 0;
  1459. case 20: /* rlwimi */
  1460. mb = (word >> 6) & 0x1f;
  1461. me = (word >> 1) & 0x1f;
  1462. val = DATA32(regs->gpr[rd]);
  1463. imm = MASK32(mb, me);
  1464. op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
  1465. goto logical_done;
  1466. case 21: /* rlwinm */
  1467. mb = (word >> 6) & 0x1f;
  1468. me = (word >> 1) & 0x1f;
  1469. val = DATA32(regs->gpr[rd]);
  1470. op->val = ROTATE(val, rb) & MASK32(mb, me);
  1471. goto logical_done;
  1472. case 23: /* rlwnm */
  1473. mb = (word >> 6) & 0x1f;
  1474. me = (word >> 1) & 0x1f;
  1475. rb = regs->gpr[rb] & 0x1f;
  1476. val = DATA32(regs->gpr[rd]);
  1477. op->val = ROTATE(val, rb) & MASK32(mb, me);
  1478. goto logical_done;
  1479. case 24: /* ori */
  1480. op->val = regs->gpr[rd] | (unsigned short) word;
  1481. goto logical_done_nocc;
  1482. case 25: /* oris */
  1483. imm = (unsigned short) word;
  1484. op->val = regs->gpr[rd] | (imm << 16);
  1485. goto logical_done_nocc;
  1486. case 26: /* xori */
  1487. op->val = regs->gpr[rd] ^ (unsigned short) word;
  1488. goto logical_done_nocc;
  1489. case 27: /* xoris */
  1490. imm = (unsigned short) word;
  1491. op->val = regs->gpr[rd] ^ (imm << 16);
  1492. goto logical_done_nocc;
  1493. case 28: /* andi. */
  1494. op->val = regs->gpr[rd] & (unsigned short) word;
  1495. set_cr0(regs, op);
  1496. goto logical_done_nocc;
  1497. case 29: /* andis. */
  1498. imm = (unsigned short) word;
  1499. op->val = regs->gpr[rd] & (imm << 16);
  1500. set_cr0(regs, op);
  1501. goto logical_done_nocc;
  1502. #ifdef __powerpc64__
  1503. case 30: /* rld* */
  1504. mb = ((word >> 6) & 0x1f) | (word & 0x20);
  1505. val = regs->gpr[rd];
  1506. if ((word & 0x10) == 0) {
  1507. sh = rb | ((word & 2) << 4);
  1508. val = ROTATE(val, sh);
  1509. switch ((word >> 2) & 3) {
  1510. case 0: /* rldicl */
  1511. val &= MASK64_L(mb);
  1512. break;
  1513. case 1: /* rldicr */
  1514. val &= MASK64_R(mb);
  1515. break;
  1516. case 2: /* rldic */
  1517. val &= MASK64(mb, 63 - sh);
  1518. break;
  1519. case 3: /* rldimi */
  1520. imm = MASK64(mb, 63 - sh);
  1521. val = (regs->gpr[ra] & ~imm) |
  1522. (val & imm);
  1523. }
  1524. op->val = val;
  1525. goto logical_done;
  1526. } else {
  1527. sh = regs->gpr[rb] & 0x3f;
  1528. val = ROTATE(val, sh);
  1529. switch ((word >> 1) & 7) {
  1530. case 0: /* rldcl */
  1531. op->val = val & MASK64_L(mb);
  1532. goto logical_done;
  1533. case 1: /* rldcr */
  1534. op->val = val & MASK64_R(mb);
  1535. goto logical_done;
  1536. }
  1537. }
  1538. #endif
  1539. op->type = UNKNOWN; /* illegal instruction */
  1540. return 0;
  1541. case 31:
  1542. /* isel occupies 32 minor opcodes */
  1543. if (((word >> 1) & 0x1f) == 15) {
  1544. mb = (word >> 6) & 0x1f; /* bc field */
  1545. val = (regs->ccr >> (31 - mb)) & 1;
  1546. val2 = (ra) ? regs->gpr[ra] : 0;
  1547. op->val = (val) ? val2 : regs->gpr[rb];
  1548. goto compute_done;
  1549. }
  1550. switch ((word >> 1) & 0x3ff) {
  1551. case 4: /* tw */
  1552. if (rd == 0x1f ||
  1553. (rd & trap_compare((int)regs->gpr[ra],
  1554. (int)regs->gpr[rb])))
  1555. goto trap;
  1556. return 1;
  1557. #ifdef __powerpc64__
  1558. case 68: /* td */
  1559. if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
  1560. goto trap;
  1561. return 1;
  1562. #endif
  1563. case 83: /* mfmsr */
  1564. if (regs->msr & MSR_PR)
  1565. goto priv;
  1566. op->type = MFMSR;
  1567. op->reg = rd;
  1568. return 0;
  1569. case 146: /* mtmsr */
  1570. if (regs->msr & MSR_PR)
  1571. goto priv;
  1572. op->type = MTMSR;
  1573. op->reg = rd;
  1574. op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
  1575. return 0;
  1576. #ifdef CONFIG_PPC64
  1577. case 178: /* mtmsrd */
  1578. if (regs->msr & MSR_PR)
  1579. goto priv;
  1580. op->type = MTMSR;
  1581. op->reg = rd;
  1582. /* only MSR_EE and MSR_RI get changed if bit 15 set */
  1583. /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
  1584. imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
  1585. op->val = imm;
  1586. return 0;
  1587. #endif
  1588. case 19: /* mfcr */
  1589. imm = 0xffffffffUL;
  1590. if ((word >> 20) & 1) {
  1591. imm = 0xf0000000UL;
  1592. for (sh = 0; sh < 8; ++sh) {
  1593. if (word & (0x80000 >> sh))
  1594. break;
  1595. imm >>= 4;
  1596. }
  1597. }
  1598. op->val = regs->ccr & imm;
  1599. goto compute_done;
  1600. case 128: /* setb */
  1601. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1602. goto unknown_opcode;
  1603. /*
  1604. * 'ra' encodes the CR field number (bfa) in the top 3 bits.
  1605. * Since each CR field is 4 bits,
  1606. * we can simply mask off the bottom two bits (bfa * 4)
  1607. * to yield the first bit in the CR field.
  1608. */
  1609. ra = ra & ~0x3;
  1610. /* 'val' stores bits of the CR field (bfa) */
  1611. val = regs->ccr >> (CR0_SHIFT - ra);
  1612. /* checks if the LT bit of CR field (bfa) is set */
  1613. if (val & 8)
  1614. op->val = -1;
  1615. /* checks if the GT bit of CR field (bfa) is set */
  1616. else if (val & 4)
  1617. op->val = 1;
  1618. else
  1619. op->val = 0;
  1620. goto compute_done;
  1621. case 144: /* mtcrf */
  1622. op->type = COMPUTE + SETCC;
  1623. imm = 0xf0000000UL;
  1624. val = regs->gpr[rd];
  1625. op->ccval = regs->ccr;
  1626. for (sh = 0; sh < 8; ++sh) {
  1627. if (word & (0x80000 >> sh))
  1628. op->ccval = (op->ccval & ~imm) |
  1629. (val & imm);
  1630. imm >>= 4;
  1631. }
  1632. return 1;
  1633. case 339: /* mfspr */
  1634. spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
  1635. op->type = MFSPR;
  1636. op->reg = rd;
  1637. op->spr = spr;
  1638. if (spr == SPRN_XER || spr == SPRN_LR ||
  1639. spr == SPRN_CTR)
  1640. return 1;
  1641. return 0;
  1642. case 467: /* mtspr */
  1643. spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
  1644. op->type = MTSPR;
  1645. op->val = regs->gpr[rd];
  1646. op->spr = spr;
  1647. if (spr == SPRN_XER || spr == SPRN_LR ||
  1648. spr == SPRN_CTR)
  1649. return 1;
  1650. return 0;
  1651. /*
  1652. * Compare instructions
  1653. */
  1654. case 0: /* cmp */
  1655. val = regs->gpr[ra];
  1656. val2 = regs->gpr[rb];
  1657. #ifdef __powerpc64__
  1658. if ((rd & 1) == 0) {
  1659. /* word (32-bit) compare */
  1660. val = (int) val;
  1661. val2 = (int) val2;
  1662. }
  1663. #endif
  1664. do_cmp_signed(regs, op, val, val2, rd >> 2);
  1665. return 1;
  1666. case 32: /* cmpl */
  1667. val = regs->gpr[ra];
  1668. val2 = regs->gpr[rb];
  1669. #ifdef __powerpc64__
  1670. if ((rd & 1) == 0) {
  1671. /* word (32-bit) compare */
  1672. val = (unsigned int) val;
  1673. val2 = (unsigned int) val2;
  1674. }
  1675. #endif
  1676. do_cmp_unsigned(regs, op, val, val2, rd >> 2);
  1677. return 1;
  1678. case 508: /* cmpb */
  1679. do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
  1680. goto logical_done_nocc;
  1681. /*
  1682. * Arithmetic instructions
  1683. */
  1684. case 8: /* subfc */
  1685. add_with_carry(regs, op, rd, ~regs->gpr[ra],
  1686. regs->gpr[rb], 1);
  1687. goto arith_done;
  1688. #ifdef __powerpc64__
  1689. case 9: /* mulhdu */
  1690. asm("mulhdu %0,%1,%2" : "=r" (op->val) :
  1691. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  1692. goto arith_done;
  1693. #endif
  1694. case 10: /* addc */
  1695. add_with_carry(regs, op, rd, regs->gpr[ra],
  1696. regs->gpr[rb], 0);
  1697. goto arith_done;
  1698. case 11: /* mulhwu */
  1699. asm("mulhwu %0,%1,%2" : "=r" (op->val) :
  1700. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  1701. goto arith_done;
  1702. case 40: /* subf */
  1703. op->val = regs->gpr[rb] - regs->gpr[ra];
  1704. goto arith_done;
  1705. #ifdef __powerpc64__
  1706. case 73: /* mulhd */
  1707. asm("mulhd %0,%1,%2" : "=r" (op->val) :
  1708. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  1709. goto arith_done;
  1710. #endif
  1711. case 75: /* mulhw */
  1712. asm("mulhw %0,%1,%2" : "=r" (op->val) :
  1713. "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
  1714. goto arith_done;
  1715. case 104: /* neg */
  1716. op->val = -regs->gpr[ra];
  1717. goto arith_done;
  1718. case 136: /* subfe */
  1719. add_with_carry(regs, op, rd, ~regs->gpr[ra],
  1720. regs->gpr[rb], regs->xer & XER_CA);
  1721. goto arith_done;
  1722. case 138: /* adde */
  1723. add_with_carry(regs, op, rd, regs->gpr[ra],
  1724. regs->gpr[rb], regs->xer & XER_CA);
  1725. goto arith_done;
  1726. case 200: /* subfze */
  1727. add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
  1728. regs->xer & XER_CA);
  1729. goto arith_done;
  1730. case 202: /* addze */
  1731. add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
  1732. regs->xer & XER_CA);
  1733. goto arith_done;
  1734. case 232: /* subfme */
  1735. add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
  1736. regs->xer & XER_CA);
  1737. goto arith_done;
  1738. #ifdef __powerpc64__
  1739. case 233: /* mulld */
  1740. op->val = regs->gpr[ra] * regs->gpr[rb];
  1741. goto arith_done;
  1742. #endif
  1743. case 234: /* addme */
  1744. add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
  1745. regs->xer & XER_CA);
  1746. goto arith_done;
  1747. case 235: /* mullw */
  1748. op->val = (long)(int) regs->gpr[ra] *
  1749. (int) regs->gpr[rb];
  1750. goto arith_done;
  1751. #ifdef __powerpc64__
  1752. case 265: /* modud */
  1753. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1754. goto unknown_opcode;
  1755. op->val = regs->gpr[ra] % regs->gpr[rb];
  1756. goto compute_done;
  1757. #endif
  1758. case 266: /* add */
  1759. op->val = regs->gpr[ra] + regs->gpr[rb];
  1760. goto arith_done;
  1761. case 267: /* moduw */
  1762. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1763. goto unknown_opcode;
  1764. op->val = (unsigned int) regs->gpr[ra] %
  1765. (unsigned int) regs->gpr[rb];
  1766. goto compute_done;
  1767. #ifdef __powerpc64__
  1768. case 457: /* divdu */
  1769. op->val = regs->gpr[ra] / regs->gpr[rb];
  1770. goto arith_done;
  1771. #endif
  1772. case 459: /* divwu */
  1773. op->val = (unsigned int) regs->gpr[ra] /
  1774. (unsigned int) regs->gpr[rb];
  1775. goto arith_done;
  1776. #ifdef __powerpc64__
  1777. case 489: /* divd */
  1778. op->val = (long int) regs->gpr[ra] /
  1779. (long int) regs->gpr[rb];
  1780. goto arith_done;
  1781. #endif
  1782. case 491: /* divw */
  1783. op->val = (int) regs->gpr[ra] /
  1784. (int) regs->gpr[rb];
  1785. goto arith_done;
  1786. #ifdef __powerpc64__
  1787. case 425: /* divde[.] */
  1788. asm volatile(PPC_DIVDE(%0, %1, %2) :
  1789. "=r" (op->val) : "r" (regs->gpr[ra]),
  1790. "r" (regs->gpr[rb]));
  1791. goto arith_done;
  1792. case 393: /* divdeu[.] */
  1793. asm volatile(PPC_DIVDEU(%0, %1, %2) :
  1794. "=r" (op->val) : "r" (regs->gpr[ra]),
  1795. "r" (regs->gpr[rb]));
  1796. goto arith_done;
  1797. #endif
  1798. case 755: /* darn */
  1799. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1800. goto unknown_opcode;
  1801. switch (ra & 0x3) {
  1802. case 0:
  1803. /* 32-bit conditioned */
  1804. asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
  1805. goto compute_done;
  1806. case 1:
  1807. /* 64-bit conditioned */
  1808. asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
  1809. goto compute_done;
  1810. case 2:
  1811. /* 64-bit raw */
  1812. asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
  1813. goto compute_done;
  1814. }
  1815. goto unknown_opcode;
  1816. #ifdef __powerpc64__
  1817. case 777: /* modsd */
  1818. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1819. goto unknown_opcode;
  1820. op->val = (long int) regs->gpr[ra] %
  1821. (long int) regs->gpr[rb];
  1822. goto compute_done;
  1823. #endif
  1824. case 779: /* modsw */
  1825. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1826. goto unknown_opcode;
  1827. op->val = (int) regs->gpr[ra] %
  1828. (int) regs->gpr[rb];
  1829. goto compute_done;
  1830. /*
  1831. * Logical instructions
  1832. */
  1833. case 26: /* cntlzw */
  1834. val = (unsigned int) regs->gpr[rd];
  1835. op->val = ( val ? __builtin_clz(val) : 32 );
  1836. goto logical_done;
  1837. #ifdef __powerpc64__
  1838. case 58: /* cntlzd */
  1839. val = regs->gpr[rd];
  1840. op->val = ( val ? __builtin_clzl(val) : 64 );
  1841. goto logical_done;
  1842. #endif
  1843. case 28: /* and */
  1844. op->val = regs->gpr[rd] & regs->gpr[rb];
  1845. goto logical_done;
  1846. case 60: /* andc */
  1847. op->val = regs->gpr[rd] & ~regs->gpr[rb];
  1848. goto logical_done;
  1849. case 122: /* popcntb */
  1850. do_popcnt(regs, op, regs->gpr[rd], 8);
  1851. goto logical_done_nocc;
  1852. case 124: /* nor */
  1853. op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
  1854. goto logical_done;
  1855. case 154: /* prtyw */
  1856. do_prty(regs, op, regs->gpr[rd], 32);
  1857. goto logical_done_nocc;
  1858. case 186: /* prtyd */
  1859. do_prty(regs, op, regs->gpr[rd], 64);
  1860. goto logical_done_nocc;
  1861. #ifdef CONFIG_PPC64
  1862. case 252: /* bpermd */
  1863. do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
  1864. goto logical_done_nocc;
  1865. #endif
  1866. case 284: /* xor */
  1867. op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
  1868. goto logical_done;
  1869. case 316: /* xor */
  1870. op->val = regs->gpr[rd] ^ regs->gpr[rb];
  1871. goto logical_done;
  1872. case 378: /* popcntw */
  1873. do_popcnt(regs, op, regs->gpr[rd], 32);
  1874. goto logical_done_nocc;
  1875. case 412: /* orc */
  1876. op->val = regs->gpr[rd] | ~regs->gpr[rb];
  1877. goto logical_done;
  1878. case 444: /* or */
  1879. op->val = regs->gpr[rd] | regs->gpr[rb];
  1880. goto logical_done;
  1881. case 476: /* nand */
  1882. op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
  1883. goto logical_done;
  1884. #ifdef CONFIG_PPC64
  1885. case 506: /* popcntd */
  1886. do_popcnt(regs, op, regs->gpr[rd], 64);
  1887. goto logical_done_nocc;
  1888. #endif
  1889. case 538: /* cnttzw */
  1890. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1891. goto unknown_opcode;
  1892. val = (unsigned int) regs->gpr[rd];
  1893. op->val = (val ? __builtin_ctz(val) : 32);
  1894. goto logical_done;
  1895. #ifdef __powerpc64__
  1896. case 570: /* cnttzd */
  1897. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1898. goto unknown_opcode;
  1899. val = regs->gpr[rd];
  1900. op->val = (val ? __builtin_ctzl(val) : 64);
  1901. goto logical_done;
  1902. #endif
  1903. case 922: /* extsh */
  1904. op->val = (signed short) regs->gpr[rd];
  1905. goto logical_done;
  1906. case 954: /* extsb */
  1907. op->val = (signed char) regs->gpr[rd];
  1908. goto logical_done;
  1909. #ifdef __powerpc64__
  1910. case 986: /* extsw */
  1911. op->val = (signed int) regs->gpr[rd];
  1912. goto logical_done;
  1913. #endif
  1914. /*
  1915. * Shift instructions
  1916. */
  1917. case 24: /* slw */
  1918. sh = regs->gpr[rb] & 0x3f;
  1919. if (sh < 32)
  1920. op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
  1921. else
  1922. op->val = 0;
  1923. goto logical_done;
  1924. case 536: /* srw */
  1925. sh = regs->gpr[rb] & 0x3f;
  1926. if (sh < 32)
  1927. op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
  1928. else
  1929. op->val = 0;
  1930. goto logical_done;
  1931. case 792: /* sraw */
  1932. op->type = COMPUTE + SETREG + SETXER;
  1933. sh = regs->gpr[rb] & 0x3f;
  1934. ival = (signed int) regs->gpr[rd];
  1935. op->val = ival >> (sh < 32 ? sh : 31);
  1936. op->xerval = regs->xer;
  1937. if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
  1938. op->xerval |= XER_CA;
  1939. else
  1940. op->xerval &= ~XER_CA;
  1941. set_ca32(op, op->xerval & XER_CA);
  1942. goto logical_done;
  1943. case 824: /* srawi */
  1944. op->type = COMPUTE + SETREG + SETXER;
  1945. sh = rb;
  1946. ival = (signed int) regs->gpr[rd];
  1947. op->val = ival >> sh;
  1948. op->xerval = regs->xer;
  1949. if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
  1950. op->xerval |= XER_CA;
  1951. else
  1952. op->xerval &= ~XER_CA;
  1953. set_ca32(op, op->xerval & XER_CA);
  1954. goto logical_done;
  1955. #ifdef __powerpc64__
  1956. case 27: /* sld */
  1957. sh = regs->gpr[rb] & 0x7f;
  1958. if (sh < 64)
  1959. op->val = regs->gpr[rd] << sh;
  1960. else
  1961. op->val = 0;
  1962. goto logical_done;
  1963. case 539: /* srd */
  1964. sh = regs->gpr[rb] & 0x7f;
  1965. if (sh < 64)
  1966. op->val = regs->gpr[rd] >> sh;
  1967. else
  1968. op->val = 0;
  1969. goto logical_done;
  1970. case 794: /* srad */
  1971. op->type = COMPUTE + SETREG + SETXER;
  1972. sh = regs->gpr[rb] & 0x7f;
  1973. ival = (signed long int) regs->gpr[rd];
  1974. op->val = ival >> (sh < 64 ? sh : 63);
  1975. op->xerval = regs->xer;
  1976. if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
  1977. op->xerval |= XER_CA;
  1978. else
  1979. op->xerval &= ~XER_CA;
  1980. set_ca32(op, op->xerval & XER_CA);
  1981. goto logical_done;
  1982. case 826: /* sradi with sh_5 = 0 */
  1983. case 827: /* sradi with sh_5 = 1 */
  1984. op->type = COMPUTE + SETREG + SETXER;
  1985. sh = rb | ((word & 2) << 4);
  1986. ival = (signed long int) regs->gpr[rd];
  1987. op->val = ival >> sh;
  1988. op->xerval = regs->xer;
  1989. if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
  1990. op->xerval |= XER_CA;
  1991. else
  1992. op->xerval &= ~XER_CA;
  1993. set_ca32(op, op->xerval & XER_CA);
  1994. goto logical_done;
  1995. case 890: /* extswsli with sh_5 = 0 */
  1996. case 891: /* extswsli with sh_5 = 1 */
  1997. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  1998. goto unknown_opcode;
  1999. op->type = COMPUTE + SETREG;
  2000. sh = rb | ((word & 2) << 4);
  2001. val = (signed int) regs->gpr[rd];
  2002. if (sh)
  2003. op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
  2004. else
  2005. op->val = val;
  2006. goto logical_done;
  2007. #endif /* __powerpc64__ */
  2008. /*
  2009. * Cache instructions
  2010. */
  2011. case 54: /* dcbst */
  2012. op->type = MKOP(CACHEOP, DCBST, 0);
  2013. op->ea = xform_ea(word, regs);
  2014. return 0;
  2015. case 86: /* dcbf */
  2016. op->type = MKOP(CACHEOP, DCBF, 0);
  2017. op->ea = xform_ea(word, regs);
  2018. return 0;
  2019. case 246: /* dcbtst */
  2020. op->type = MKOP(CACHEOP, DCBTST, 0);
  2021. op->ea = xform_ea(word, regs);
  2022. op->reg = rd;
  2023. return 0;
  2024. case 278: /* dcbt */
  2025. op->type = MKOP(CACHEOP, DCBTST, 0);
  2026. op->ea = xform_ea(word, regs);
  2027. op->reg = rd;
  2028. return 0;
  2029. case 982: /* icbi */
  2030. op->type = MKOP(CACHEOP, ICBI, 0);
  2031. op->ea = xform_ea(word, regs);
  2032. return 0;
  2033. case 1014: /* dcbz */
  2034. op->type = MKOP(CACHEOP, DCBZ, 0);
  2035. op->ea = xform_ea(word, regs);
  2036. return 0;
  2037. }
  2038. break;
  2039. }
  2040. /*
  2041. * Loads and stores.
  2042. */
  2043. op->type = UNKNOWN;
  2044. op->update_reg = ra;
  2045. op->reg = rd;
  2046. op->val = regs->gpr[rd];
  2047. u = (word >> 20) & UPDATE;
  2048. op->vsx_flags = 0;
  2049. switch (opcode) {
  2050. case 31:
  2051. u = word & UPDATE;
  2052. op->ea = xform_ea(word, regs);
  2053. switch ((word >> 1) & 0x3ff) {
  2054. case 20: /* lwarx */
  2055. op->type = MKOP(LARX, 0, 4);
  2056. break;
  2057. case 150: /* stwcx. */
  2058. op->type = MKOP(STCX, 0, 4);
  2059. break;
  2060. #ifdef __powerpc64__
  2061. case 84: /* ldarx */
  2062. op->type = MKOP(LARX, 0, 8);
  2063. break;
  2064. case 214: /* stdcx. */
  2065. op->type = MKOP(STCX, 0, 8);
  2066. break;
  2067. case 52: /* lbarx */
  2068. op->type = MKOP(LARX, 0, 1);
  2069. break;
  2070. case 694: /* stbcx. */
  2071. op->type = MKOP(STCX, 0, 1);
  2072. break;
  2073. case 116: /* lharx */
  2074. op->type = MKOP(LARX, 0, 2);
  2075. break;
  2076. case 726: /* sthcx. */
  2077. op->type = MKOP(STCX, 0, 2);
  2078. break;
  2079. case 276: /* lqarx */
  2080. if (!((rd & 1) || rd == ra || rd == rb))
  2081. op->type = MKOP(LARX, 0, 16);
  2082. break;
  2083. case 182: /* stqcx. */
  2084. if (!(rd & 1))
  2085. op->type = MKOP(STCX, 0, 16);
  2086. break;
  2087. #endif
  2088. case 23: /* lwzx */
  2089. case 55: /* lwzux */
  2090. op->type = MKOP(LOAD, u, 4);
  2091. break;
  2092. case 87: /* lbzx */
  2093. case 119: /* lbzux */
  2094. op->type = MKOP(LOAD, u, 1);
  2095. break;
  2096. #ifdef CONFIG_ALTIVEC
  2097. /*
  2098. * Note: for the load/store vector element instructions,
  2099. * bits of the EA say which field of the VMX register to use.
  2100. */
  2101. case 7: /* lvebx */
  2102. op->type = MKOP(LOAD_VMX, 0, 1);
  2103. op->element_size = 1;
  2104. break;
  2105. case 39: /* lvehx */
  2106. op->type = MKOP(LOAD_VMX, 0, 2);
  2107. op->element_size = 2;
  2108. break;
  2109. case 71: /* lvewx */
  2110. op->type = MKOP(LOAD_VMX, 0, 4);
  2111. op->element_size = 4;
  2112. break;
  2113. case 103: /* lvx */
  2114. case 359: /* lvxl */
  2115. op->type = MKOP(LOAD_VMX, 0, 16);
  2116. op->element_size = 16;
  2117. break;
  2118. case 135: /* stvebx */
  2119. op->type = MKOP(STORE_VMX, 0, 1);
  2120. op->element_size = 1;
  2121. break;
  2122. case 167: /* stvehx */
  2123. op->type = MKOP(STORE_VMX, 0, 2);
  2124. op->element_size = 2;
  2125. break;
  2126. case 199: /* stvewx */
  2127. op->type = MKOP(STORE_VMX, 0, 4);
  2128. op->element_size = 4;
  2129. break;
  2130. case 231: /* stvx */
  2131. case 487: /* stvxl */
  2132. op->type = MKOP(STORE_VMX, 0, 16);
  2133. break;
  2134. #endif /* CONFIG_ALTIVEC */
  2135. #ifdef __powerpc64__
  2136. case 21: /* ldx */
  2137. case 53: /* ldux */
  2138. op->type = MKOP(LOAD, u, 8);
  2139. break;
  2140. case 149: /* stdx */
  2141. case 181: /* stdux */
  2142. op->type = MKOP(STORE, u, 8);
  2143. break;
  2144. #endif
  2145. case 151: /* stwx */
  2146. case 183: /* stwux */
  2147. op->type = MKOP(STORE, u, 4);
  2148. break;
  2149. case 215: /* stbx */
  2150. case 247: /* stbux */
  2151. op->type = MKOP(STORE, u, 1);
  2152. break;
  2153. case 279: /* lhzx */
  2154. case 311: /* lhzux */
  2155. op->type = MKOP(LOAD, u, 2);
  2156. break;
  2157. #ifdef __powerpc64__
  2158. case 341: /* lwax */
  2159. case 373: /* lwaux */
  2160. op->type = MKOP(LOAD, SIGNEXT | u, 4);
  2161. break;
  2162. #endif
  2163. case 343: /* lhax */
  2164. case 375: /* lhaux */
  2165. op->type = MKOP(LOAD, SIGNEXT | u, 2);
  2166. break;
  2167. case 407: /* sthx */
  2168. case 439: /* sthux */
  2169. op->type = MKOP(STORE, u, 2);
  2170. break;
  2171. #ifdef __powerpc64__
  2172. case 532: /* ldbrx */
  2173. op->type = MKOP(LOAD, BYTEREV, 8);
  2174. break;
  2175. #endif
  2176. case 533: /* lswx */
  2177. op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
  2178. break;
  2179. case 534: /* lwbrx */
  2180. op->type = MKOP(LOAD, BYTEREV, 4);
  2181. break;
  2182. case 597: /* lswi */
  2183. if (rb == 0)
  2184. rb = 32; /* # bytes to load */
  2185. op->type = MKOP(LOAD_MULTI, 0, rb);
  2186. op->ea = ra ? regs->gpr[ra] : 0;
  2187. break;
  2188. #ifdef CONFIG_PPC_FPU
  2189. case 535: /* lfsx */
  2190. case 567: /* lfsux */
  2191. op->type = MKOP(LOAD_FP, u | FPCONV, 4);
  2192. break;
  2193. case 599: /* lfdx */
  2194. case 631: /* lfdux */
  2195. op->type = MKOP(LOAD_FP, u, 8);
  2196. break;
  2197. case 663: /* stfsx */
  2198. case 695: /* stfsux */
  2199. op->type = MKOP(STORE_FP, u | FPCONV, 4);
  2200. break;
  2201. case 727: /* stfdx */
  2202. case 759: /* stfdux */
  2203. op->type = MKOP(STORE_FP, u, 8);
  2204. break;
  2205. #ifdef __powerpc64__
  2206. case 791: /* lfdpx */
  2207. op->type = MKOP(LOAD_FP, 0, 16);
  2208. break;
  2209. case 855: /* lfiwax */
  2210. op->type = MKOP(LOAD_FP, SIGNEXT, 4);
  2211. break;
  2212. case 887: /* lfiwzx */
  2213. op->type = MKOP(LOAD_FP, 0, 4);
  2214. break;
  2215. case 919: /* stfdpx */
  2216. op->type = MKOP(STORE_FP, 0, 16);
  2217. break;
  2218. case 983: /* stfiwx */
  2219. op->type = MKOP(STORE_FP, 0, 4);
  2220. break;
  2221. #endif /* __powerpc64 */
  2222. #endif /* CONFIG_PPC_FPU */
  2223. #ifdef __powerpc64__
  2224. case 660: /* stdbrx */
  2225. op->type = MKOP(STORE, BYTEREV, 8);
  2226. op->val = byterev_8(regs->gpr[rd]);
  2227. break;
  2228. #endif
  2229. case 661: /* stswx */
  2230. op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
  2231. break;
  2232. case 662: /* stwbrx */
  2233. op->type = MKOP(STORE, BYTEREV, 4);
  2234. op->val = byterev_4(regs->gpr[rd]);
  2235. break;
  2236. case 725: /* stswi */
  2237. if (rb == 0)
  2238. rb = 32; /* # bytes to store */
  2239. op->type = MKOP(STORE_MULTI, 0, rb);
  2240. op->ea = ra ? regs->gpr[ra] : 0;
  2241. break;
  2242. case 790: /* lhbrx */
  2243. op->type = MKOP(LOAD, BYTEREV, 2);
  2244. break;
  2245. case 918: /* sthbrx */
  2246. op->type = MKOP(STORE, BYTEREV, 2);
  2247. op->val = byterev_2(regs->gpr[rd]);
  2248. break;
  2249. #ifdef CONFIG_VSX
  2250. case 12: /* lxsiwzx */
  2251. op->reg = rd | ((word & 1) << 5);
  2252. op->type = MKOP(LOAD_VSX, 0, 4);
  2253. op->element_size = 8;
  2254. break;
  2255. case 76: /* lxsiwax */
  2256. op->reg = rd | ((word & 1) << 5);
  2257. op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
  2258. op->element_size = 8;
  2259. break;
  2260. case 140: /* stxsiwx */
  2261. op->reg = rd | ((word & 1) << 5);
  2262. op->type = MKOP(STORE_VSX, 0, 4);
  2263. op->element_size = 8;
  2264. break;
  2265. case 268: /* lxvx */
  2266. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2267. goto unknown_opcode;
  2268. op->reg = rd | ((word & 1) << 5);
  2269. op->type = MKOP(LOAD_VSX, 0, 16);
  2270. op->element_size = 16;
  2271. op->vsx_flags = VSX_CHECK_VEC;
  2272. break;
  2273. case 269: /* lxvl */
  2274. case 301: { /* lxvll */
  2275. int nb;
  2276. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2277. goto unknown_opcode;
  2278. op->reg = rd | ((word & 1) << 5);
  2279. op->ea = ra ? regs->gpr[ra] : 0;
  2280. nb = regs->gpr[rb] & 0xff;
  2281. if (nb > 16)
  2282. nb = 16;
  2283. op->type = MKOP(LOAD_VSX, 0, nb);
  2284. op->element_size = 16;
  2285. op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
  2286. VSX_CHECK_VEC;
  2287. break;
  2288. }
  2289. case 332: /* lxvdsx */
  2290. op->reg = rd | ((word & 1) << 5);
  2291. op->type = MKOP(LOAD_VSX, 0, 8);
  2292. op->element_size = 8;
  2293. op->vsx_flags = VSX_SPLAT;
  2294. break;
  2295. case 333: /* lxvpx */
  2296. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  2297. goto unknown_opcode;
  2298. op->reg = VSX_REGISTER_XTP(rd);
  2299. op->type = MKOP(LOAD_VSX, 0, 32);
  2300. op->element_size = 32;
  2301. break;
  2302. case 364: /* lxvwsx */
  2303. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2304. goto unknown_opcode;
  2305. op->reg = rd | ((word & 1) << 5);
  2306. op->type = MKOP(LOAD_VSX, 0, 4);
  2307. op->element_size = 4;
  2308. op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
  2309. break;
  2310. case 396: /* stxvx */
  2311. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2312. goto unknown_opcode;
  2313. op->reg = rd | ((word & 1) << 5);
  2314. op->type = MKOP(STORE_VSX, 0, 16);
  2315. op->element_size = 16;
  2316. op->vsx_flags = VSX_CHECK_VEC;
  2317. break;
  2318. case 397: /* stxvl */
  2319. case 429: { /* stxvll */
  2320. int nb;
  2321. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2322. goto unknown_opcode;
  2323. op->reg = rd | ((word & 1) << 5);
  2324. op->ea = ra ? regs->gpr[ra] : 0;
  2325. nb = regs->gpr[rb] & 0xff;
  2326. if (nb > 16)
  2327. nb = 16;
  2328. op->type = MKOP(STORE_VSX, 0, nb);
  2329. op->element_size = 16;
  2330. op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
  2331. VSX_CHECK_VEC;
  2332. break;
  2333. }
  2334. case 461: /* stxvpx */
  2335. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  2336. goto unknown_opcode;
  2337. op->reg = VSX_REGISTER_XTP(rd);
  2338. op->type = MKOP(STORE_VSX, 0, 32);
  2339. op->element_size = 32;
  2340. break;
  2341. case 524: /* lxsspx */
  2342. op->reg = rd | ((word & 1) << 5);
  2343. op->type = MKOP(LOAD_VSX, 0, 4);
  2344. op->element_size = 8;
  2345. op->vsx_flags = VSX_FPCONV;
  2346. break;
  2347. case 588: /* lxsdx */
  2348. op->reg = rd | ((word & 1) << 5);
  2349. op->type = MKOP(LOAD_VSX, 0, 8);
  2350. op->element_size = 8;
  2351. break;
  2352. case 652: /* stxsspx */
  2353. op->reg = rd | ((word & 1) << 5);
  2354. op->type = MKOP(STORE_VSX, 0, 4);
  2355. op->element_size = 8;
  2356. op->vsx_flags = VSX_FPCONV;
  2357. break;
  2358. case 716: /* stxsdx */
  2359. op->reg = rd | ((word & 1) << 5);
  2360. op->type = MKOP(STORE_VSX, 0, 8);
  2361. op->element_size = 8;
  2362. break;
  2363. case 780: /* lxvw4x */
  2364. op->reg = rd | ((word & 1) << 5);
  2365. op->type = MKOP(LOAD_VSX, 0, 16);
  2366. op->element_size = 4;
  2367. break;
  2368. case 781: /* lxsibzx */
  2369. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2370. goto unknown_opcode;
  2371. op->reg = rd | ((word & 1) << 5);
  2372. op->type = MKOP(LOAD_VSX, 0, 1);
  2373. op->element_size = 8;
  2374. op->vsx_flags = VSX_CHECK_VEC;
  2375. break;
  2376. case 812: /* lxvh8x */
  2377. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2378. goto unknown_opcode;
  2379. op->reg = rd | ((word & 1) << 5);
  2380. op->type = MKOP(LOAD_VSX, 0, 16);
  2381. op->element_size = 2;
  2382. op->vsx_flags = VSX_CHECK_VEC;
  2383. break;
  2384. case 813: /* lxsihzx */
  2385. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2386. goto unknown_opcode;
  2387. op->reg = rd | ((word & 1) << 5);
  2388. op->type = MKOP(LOAD_VSX, 0, 2);
  2389. op->element_size = 8;
  2390. op->vsx_flags = VSX_CHECK_VEC;
  2391. break;
  2392. case 844: /* lxvd2x */
  2393. op->reg = rd | ((word & 1) << 5);
  2394. op->type = MKOP(LOAD_VSX, 0, 16);
  2395. op->element_size = 8;
  2396. break;
  2397. case 876: /* lxvb16x */
  2398. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2399. goto unknown_opcode;
  2400. op->reg = rd | ((word & 1) << 5);
  2401. op->type = MKOP(LOAD_VSX, 0, 16);
  2402. op->element_size = 1;
  2403. op->vsx_flags = VSX_CHECK_VEC;
  2404. break;
  2405. case 908: /* stxvw4x */
  2406. op->reg = rd | ((word & 1) << 5);
  2407. op->type = MKOP(STORE_VSX, 0, 16);
  2408. op->element_size = 4;
  2409. break;
  2410. case 909: /* stxsibx */
  2411. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2412. goto unknown_opcode;
  2413. op->reg = rd | ((word & 1) << 5);
  2414. op->type = MKOP(STORE_VSX, 0, 1);
  2415. op->element_size = 8;
  2416. op->vsx_flags = VSX_CHECK_VEC;
  2417. break;
  2418. case 940: /* stxvh8x */
  2419. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2420. goto unknown_opcode;
  2421. op->reg = rd | ((word & 1) << 5);
  2422. op->type = MKOP(STORE_VSX, 0, 16);
  2423. op->element_size = 2;
  2424. op->vsx_flags = VSX_CHECK_VEC;
  2425. break;
  2426. case 941: /* stxsihx */
  2427. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2428. goto unknown_opcode;
  2429. op->reg = rd | ((word & 1) << 5);
  2430. op->type = MKOP(STORE_VSX, 0, 2);
  2431. op->element_size = 8;
  2432. op->vsx_flags = VSX_CHECK_VEC;
  2433. break;
  2434. case 972: /* stxvd2x */
  2435. op->reg = rd | ((word & 1) << 5);
  2436. op->type = MKOP(STORE_VSX, 0, 16);
  2437. op->element_size = 8;
  2438. break;
  2439. case 1004: /* stxvb16x */
  2440. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2441. goto unknown_opcode;
  2442. op->reg = rd | ((word & 1) << 5);
  2443. op->type = MKOP(STORE_VSX, 0, 16);
  2444. op->element_size = 1;
  2445. op->vsx_flags = VSX_CHECK_VEC;
  2446. break;
  2447. #endif /* CONFIG_VSX */
  2448. }
  2449. break;
  2450. case 32: /* lwz */
  2451. case 33: /* lwzu */
  2452. op->type = MKOP(LOAD, u, 4);
  2453. op->ea = dform_ea(word, regs);
  2454. break;
  2455. case 34: /* lbz */
  2456. case 35: /* lbzu */
  2457. op->type = MKOP(LOAD, u, 1);
  2458. op->ea = dform_ea(word, regs);
  2459. break;
  2460. case 36: /* stw */
  2461. case 37: /* stwu */
  2462. op->type = MKOP(STORE, u, 4);
  2463. op->ea = dform_ea(word, regs);
  2464. break;
  2465. case 38: /* stb */
  2466. case 39: /* stbu */
  2467. op->type = MKOP(STORE, u, 1);
  2468. op->ea = dform_ea(word, regs);
  2469. break;
  2470. case 40: /* lhz */
  2471. case 41: /* lhzu */
  2472. op->type = MKOP(LOAD, u, 2);
  2473. op->ea = dform_ea(word, regs);
  2474. break;
  2475. case 42: /* lha */
  2476. case 43: /* lhau */
  2477. op->type = MKOP(LOAD, SIGNEXT | u, 2);
  2478. op->ea = dform_ea(word, regs);
  2479. break;
  2480. case 44: /* sth */
  2481. case 45: /* sthu */
  2482. op->type = MKOP(STORE, u, 2);
  2483. op->ea = dform_ea(word, regs);
  2484. break;
  2485. case 46: /* lmw */
  2486. if (ra >= rd)
  2487. break; /* invalid form, ra in range to load */
  2488. op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
  2489. op->ea = dform_ea(word, regs);
  2490. break;
  2491. case 47: /* stmw */
  2492. op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
  2493. op->ea = dform_ea(word, regs);
  2494. break;
  2495. #ifdef CONFIG_PPC_FPU
  2496. case 48: /* lfs */
  2497. case 49: /* lfsu */
  2498. op->type = MKOP(LOAD_FP, u | FPCONV, 4);
  2499. op->ea = dform_ea(word, regs);
  2500. break;
  2501. case 50: /* lfd */
  2502. case 51: /* lfdu */
  2503. op->type = MKOP(LOAD_FP, u, 8);
  2504. op->ea = dform_ea(word, regs);
  2505. break;
  2506. case 52: /* stfs */
  2507. case 53: /* stfsu */
  2508. op->type = MKOP(STORE_FP, u | FPCONV, 4);
  2509. op->ea = dform_ea(word, regs);
  2510. break;
  2511. case 54: /* stfd */
  2512. case 55: /* stfdu */
  2513. op->type = MKOP(STORE_FP, u, 8);
  2514. op->ea = dform_ea(word, regs);
  2515. break;
  2516. #endif
  2517. #ifdef __powerpc64__
  2518. case 56: /* lq */
  2519. if (!((rd & 1) || (rd == ra)))
  2520. op->type = MKOP(LOAD, 0, 16);
  2521. op->ea = dqform_ea(word, regs);
  2522. break;
  2523. #endif
  2524. #ifdef CONFIG_VSX
  2525. case 57: /* lfdp, lxsd, lxssp */
  2526. op->ea = dsform_ea(word, regs);
  2527. switch (word & 3) {
  2528. case 0: /* lfdp */
  2529. if (rd & 1)
  2530. break; /* reg must be even */
  2531. op->type = MKOP(LOAD_FP, 0, 16);
  2532. break;
  2533. case 2: /* lxsd */
  2534. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2535. goto unknown_opcode;
  2536. op->reg = rd + 32;
  2537. op->type = MKOP(LOAD_VSX, 0, 8);
  2538. op->element_size = 8;
  2539. op->vsx_flags = VSX_CHECK_VEC;
  2540. break;
  2541. case 3: /* lxssp */
  2542. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2543. goto unknown_opcode;
  2544. op->reg = rd + 32;
  2545. op->type = MKOP(LOAD_VSX, 0, 4);
  2546. op->element_size = 8;
  2547. op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
  2548. break;
  2549. }
  2550. break;
  2551. #endif /* CONFIG_VSX */
  2552. #ifdef __powerpc64__
  2553. case 58: /* ld[u], lwa */
  2554. op->ea = dsform_ea(word, regs);
  2555. switch (word & 3) {
  2556. case 0: /* ld */
  2557. op->type = MKOP(LOAD, 0, 8);
  2558. break;
  2559. case 1: /* ldu */
  2560. op->type = MKOP(LOAD, UPDATE, 8);
  2561. break;
  2562. case 2: /* lwa */
  2563. op->type = MKOP(LOAD, SIGNEXT, 4);
  2564. break;
  2565. }
  2566. break;
  2567. #endif
  2568. #ifdef CONFIG_VSX
  2569. case 6:
  2570. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  2571. goto unknown_opcode;
  2572. op->ea = dqform_ea(word, regs);
  2573. op->reg = VSX_REGISTER_XTP(rd);
  2574. op->element_size = 32;
  2575. switch (word & 0xf) {
  2576. case 0: /* lxvp */
  2577. op->type = MKOP(LOAD_VSX, 0, 32);
  2578. break;
  2579. case 1: /* stxvp */
  2580. op->type = MKOP(STORE_VSX, 0, 32);
  2581. break;
  2582. }
  2583. break;
  2584. case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
  2585. switch (word & 7) {
  2586. case 0: /* stfdp with LSB of DS field = 0 */
  2587. case 4: /* stfdp with LSB of DS field = 1 */
  2588. op->ea = dsform_ea(word, regs);
  2589. op->type = MKOP(STORE_FP, 0, 16);
  2590. break;
  2591. case 1: /* lxv */
  2592. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2593. goto unknown_opcode;
  2594. op->ea = dqform_ea(word, regs);
  2595. if (word & 8)
  2596. op->reg = rd + 32;
  2597. op->type = MKOP(LOAD_VSX, 0, 16);
  2598. op->element_size = 16;
  2599. op->vsx_flags = VSX_CHECK_VEC;
  2600. break;
  2601. case 2: /* stxsd with LSB of DS field = 0 */
  2602. case 6: /* stxsd with LSB of DS field = 1 */
  2603. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2604. goto unknown_opcode;
  2605. op->ea = dsform_ea(word, regs);
  2606. op->reg = rd + 32;
  2607. op->type = MKOP(STORE_VSX, 0, 8);
  2608. op->element_size = 8;
  2609. op->vsx_flags = VSX_CHECK_VEC;
  2610. break;
  2611. case 3: /* stxssp with LSB of DS field = 0 */
  2612. case 7: /* stxssp with LSB of DS field = 1 */
  2613. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2614. goto unknown_opcode;
  2615. op->ea = dsform_ea(word, regs);
  2616. op->reg = rd + 32;
  2617. op->type = MKOP(STORE_VSX, 0, 4);
  2618. op->element_size = 8;
  2619. op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
  2620. break;
  2621. case 5: /* stxv */
  2622. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  2623. goto unknown_opcode;
  2624. op->ea = dqform_ea(word, regs);
  2625. if (word & 8)
  2626. op->reg = rd + 32;
  2627. op->type = MKOP(STORE_VSX, 0, 16);
  2628. op->element_size = 16;
  2629. op->vsx_flags = VSX_CHECK_VEC;
  2630. break;
  2631. }
  2632. break;
  2633. #endif /* CONFIG_VSX */
  2634. #ifdef __powerpc64__
  2635. case 62: /* std[u] */
  2636. op->ea = dsform_ea(word, regs);
  2637. switch (word & 3) {
  2638. case 0: /* std */
  2639. op->type = MKOP(STORE, 0, 8);
  2640. break;
  2641. case 1: /* stdu */
  2642. op->type = MKOP(STORE, UPDATE, 8);
  2643. break;
  2644. case 2: /* stq */
  2645. if (!(rd & 1))
  2646. op->type = MKOP(STORE, 0, 16);
  2647. break;
  2648. }
  2649. break;
  2650. case 1: /* Prefixed instructions */
  2651. if (!cpu_has_feature(CPU_FTR_ARCH_31))
  2652. goto unknown_opcode;
  2653. prefix_r = GET_PREFIX_R(word);
  2654. ra = GET_PREFIX_RA(suffix);
  2655. op->update_reg = ra;
  2656. rd = (suffix >> 21) & 0x1f;
  2657. op->reg = rd;
  2658. op->val = regs->gpr[rd];
  2659. suffixopcode = get_op(suffix);
  2660. prefixtype = (word >> 24) & 0x3;
  2661. switch (prefixtype) {
  2662. case 0: /* Type 00 Eight-Byte Load/Store */
  2663. if (prefix_r && ra)
  2664. break;
  2665. op->ea = mlsd_8lsd_ea(word, suffix, regs);
  2666. switch (suffixopcode) {
  2667. case 41: /* plwa */
  2668. op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
  2669. break;
  2670. #ifdef CONFIG_VSX
  2671. case 42: /* plxsd */
  2672. op->reg = rd + 32;
  2673. op->type = MKOP(LOAD_VSX, PREFIXED, 8);
  2674. op->element_size = 8;
  2675. op->vsx_flags = VSX_CHECK_VEC;
  2676. break;
  2677. case 43: /* plxssp */
  2678. op->reg = rd + 32;
  2679. op->type = MKOP(LOAD_VSX, PREFIXED, 4);
  2680. op->element_size = 8;
  2681. op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
  2682. break;
  2683. case 46: /* pstxsd */
  2684. op->reg = rd + 32;
  2685. op->type = MKOP(STORE_VSX, PREFIXED, 8);
  2686. op->element_size = 8;
  2687. op->vsx_flags = VSX_CHECK_VEC;
  2688. break;
  2689. case 47: /* pstxssp */
  2690. op->reg = rd + 32;
  2691. op->type = MKOP(STORE_VSX, PREFIXED, 4);
  2692. op->element_size = 8;
  2693. op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
  2694. break;
  2695. case 51: /* plxv1 */
  2696. op->reg += 32;
  2697. fallthrough;
  2698. case 50: /* plxv0 */
  2699. op->type = MKOP(LOAD_VSX, PREFIXED, 16);
  2700. op->element_size = 16;
  2701. op->vsx_flags = VSX_CHECK_VEC;
  2702. break;
  2703. case 55: /* pstxv1 */
  2704. op->reg = rd + 32;
  2705. fallthrough;
  2706. case 54: /* pstxv0 */
  2707. op->type = MKOP(STORE_VSX, PREFIXED, 16);
  2708. op->element_size = 16;
  2709. op->vsx_flags = VSX_CHECK_VEC;
  2710. break;
  2711. #endif /* CONFIG_VSX */
  2712. case 56: /* plq */
  2713. op->type = MKOP(LOAD, PREFIXED, 16);
  2714. break;
  2715. case 57: /* pld */
  2716. op->type = MKOP(LOAD, PREFIXED, 8);
  2717. break;
  2718. #ifdef CONFIG_VSX
  2719. case 58: /* plxvp */
  2720. op->reg = VSX_REGISTER_XTP(rd);
  2721. op->type = MKOP(LOAD_VSX, PREFIXED, 32);
  2722. op->element_size = 32;
  2723. break;
  2724. #endif /* CONFIG_VSX */
  2725. case 60: /* pstq */
  2726. op->type = MKOP(STORE, PREFIXED, 16);
  2727. break;
  2728. case 61: /* pstd */
  2729. op->type = MKOP(STORE, PREFIXED, 8);
  2730. break;
  2731. #ifdef CONFIG_VSX
  2732. case 62: /* pstxvp */
  2733. op->reg = VSX_REGISTER_XTP(rd);
  2734. op->type = MKOP(STORE_VSX, PREFIXED, 32);
  2735. op->element_size = 32;
  2736. break;
  2737. #endif /* CONFIG_VSX */
  2738. }
  2739. break;
  2740. case 1: /* Type 01 Eight-Byte Register-to-Register */
  2741. break;
  2742. case 2: /* Type 10 Modified Load/Store */
  2743. if (prefix_r && ra)
  2744. break;
  2745. op->ea = mlsd_8lsd_ea(word, suffix, regs);
  2746. switch (suffixopcode) {
  2747. case 32: /* plwz */
  2748. op->type = MKOP(LOAD, PREFIXED, 4);
  2749. break;
  2750. case 34: /* plbz */
  2751. op->type = MKOP(LOAD, PREFIXED, 1);
  2752. break;
  2753. case 36: /* pstw */
  2754. op->type = MKOP(STORE, PREFIXED, 4);
  2755. break;
  2756. case 38: /* pstb */
  2757. op->type = MKOP(STORE, PREFIXED, 1);
  2758. break;
  2759. case 40: /* plhz */
  2760. op->type = MKOP(LOAD, PREFIXED, 2);
  2761. break;
  2762. case 42: /* plha */
  2763. op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
  2764. break;
  2765. case 44: /* psth */
  2766. op->type = MKOP(STORE, PREFIXED, 2);
  2767. break;
  2768. case 48: /* plfs */
  2769. op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
  2770. break;
  2771. case 50: /* plfd */
  2772. op->type = MKOP(LOAD_FP, PREFIXED, 8);
  2773. break;
  2774. case 52: /* pstfs */
  2775. op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
  2776. break;
  2777. case 54: /* pstfd */
  2778. op->type = MKOP(STORE_FP, PREFIXED, 8);
  2779. break;
  2780. }
  2781. break;
  2782. case 3: /* Type 11 Modified Register-to-Register */
  2783. break;
  2784. }
  2785. #endif /* __powerpc64__ */
  2786. }
  2787. if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
  2788. switch (GETTYPE(op->type)) {
  2789. case LOAD:
  2790. if (ra == rd)
  2791. goto unknown_opcode;
  2792. fallthrough;
  2793. case STORE:
  2794. case LOAD_FP:
  2795. case STORE_FP:
  2796. if (ra == 0)
  2797. goto unknown_opcode;
  2798. }
  2799. }
  2800. #ifdef CONFIG_VSX
  2801. if ((GETTYPE(op->type) == LOAD_VSX ||
  2802. GETTYPE(op->type) == STORE_VSX) &&
  2803. !cpu_has_feature(CPU_FTR_VSX)) {
  2804. return -1;
  2805. }
  2806. #endif /* CONFIG_VSX */
  2807. return 0;
  2808. unknown_opcode:
  2809. op->type = UNKNOWN;
  2810. return 0;
  2811. logical_done:
  2812. if (word & 1)
  2813. set_cr0(regs, op);
  2814. logical_done_nocc:
  2815. op->reg = ra;
  2816. op->type |= SETREG;
  2817. return 1;
  2818. arith_done:
  2819. if (word & 1)
  2820. set_cr0(regs, op);
  2821. compute_done:
  2822. op->reg = rd;
  2823. op->type |= SETREG;
  2824. return 1;
  2825. priv:
  2826. op->type = INTERRUPT | 0x700;
  2827. op->val = SRR1_PROGPRIV;
  2828. return 0;
  2829. trap:
  2830. op->type = INTERRUPT | 0x700;
  2831. op->val = SRR1_PROGTRAP;
  2832. return 0;
  2833. }
  2834. EXPORT_SYMBOL_GPL(analyse_instr);
  2835. NOKPROBE_SYMBOL(analyse_instr);
  2836. /*
  2837. * For PPC32 we always use stwu with r1 to change the stack pointer.
  2838. * So this emulated store may corrupt the exception frame, now we
  2839. * have to provide the exception frame trampoline, which is pushed
  2840. * below the kprobed function stack. So we only update gpr[1] but
  2841. * don't emulate the real store operation. We will do real store
  2842. * operation safely in exception return code by checking this flag.
  2843. */
  2844. static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
  2845. {
  2846. /*
  2847. * Check if we already set since that means we'll
  2848. * lose the previous value.
  2849. */
  2850. WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
  2851. set_thread_flag(TIF_EMULATE_STACK_STORE);
  2852. return 0;
  2853. }
  2854. static nokprobe_inline void do_signext(unsigned long *valp, int size)
  2855. {
  2856. switch (size) {
  2857. case 2:
  2858. *valp = (signed short) *valp;
  2859. break;
  2860. case 4:
  2861. *valp = (signed int) *valp;
  2862. break;
  2863. }
  2864. }
  2865. static nokprobe_inline void do_byterev(unsigned long *valp, int size)
  2866. {
  2867. switch (size) {
  2868. case 2:
  2869. *valp = byterev_2(*valp);
  2870. break;
  2871. case 4:
  2872. *valp = byterev_4(*valp);
  2873. break;
  2874. #ifdef __powerpc64__
  2875. case 8:
  2876. *valp = byterev_8(*valp);
  2877. break;
  2878. #endif
  2879. }
  2880. }
  2881. /*
  2882. * Emulate an instruction that can be executed just by updating
  2883. * fields in *regs.
  2884. */
  2885. void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
  2886. {
  2887. unsigned long next_pc;
  2888. next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
  2889. switch (GETTYPE(op->type)) {
  2890. case COMPUTE:
  2891. if (op->type & SETREG)
  2892. regs->gpr[op->reg] = op->val;
  2893. if (op->type & SETCC)
  2894. regs->ccr = op->ccval;
  2895. if (op->type & SETXER)
  2896. regs->xer = op->xerval;
  2897. break;
  2898. case BRANCH:
  2899. if (op->type & SETLK)
  2900. regs->link = next_pc;
  2901. if (op->type & BRTAKEN)
  2902. next_pc = op->val;
  2903. if (op->type & DECCTR)
  2904. --regs->ctr;
  2905. break;
  2906. case BARRIER:
  2907. switch (op->type & BARRIER_MASK) {
  2908. case BARRIER_SYNC:
  2909. mb();
  2910. break;
  2911. case BARRIER_ISYNC:
  2912. isync();
  2913. break;
  2914. case BARRIER_EIEIO:
  2915. eieio();
  2916. break;
  2917. #ifdef CONFIG_PPC64
  2918. case BARRIER_LWSYNC:
  2919. asm volatile("lwsync" : : : "memory");
  2920. break;
  2921. case BARRIER_PTESYNC:
  2922. asm volatile("ptesync" : : : "memory");
  2923. break;
  2924. #endif
  2925. }
  2926. break;
  2927. case MFSPR:
  2928. switch (op->spr) {
  2929. case SPRN_XER:
  2930. regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
  2931. break;
  2932. case SPRN_LR:
  2933. regs->gpr[op->reg] = regs->link;
  2934. break;
  2935. case SPRN_CTR:
  2936. regs->gpr[op->reg] = regs->ctr;
  2937. break;
  2938. default:
  2939. WARN_ON_ONCE(1);
  2940. }
  2941. break;
  2942. case MTSPR:
  2943. switch (op->spr) {
  2944. case SPRN_XER:
  2945. regs->xer = op->val & 0xffffffffUL;
  2946. break;
  2947. case SPRN_LR:
  2948. regs->link = op->val;
  2949. break;
  2950. case SPRN_CTR:
  2951. regs->ctr = op->val;
  2952. break;
  2953. default:
  2954. WARN_ON_ONCE(1);
  2955. }
  2956. break;
  2957. default:
  2958. WARN_ON_ONCE(1);
  2959. }
  2960. regs_set_return_ip(regs, next_pc);
  2961. }
  2962. NOKPROBE_SYMBOL(emulate_update_regs);
  2963. /*
  2964. * Emulate a previously-analysed load or store instruction.
  2965. * Return values are:
  2966. * 0 = instruction emulated successfully
  2967. * -EFAULT = address out of range or access faulted (regs->dar
  2968. * contains the faulting address)
  2969. * -EACCES = misaligned access, instruction requires alignment
  2970. * -EINVAL = unknown operation in *op
  2971. */
  2972. int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
  2973. {
  2974. int err, size, type;
  2975. int i, rd, nb;
  2976. unsigned int cr;
  2977. unsigned long val;
  2978. unsigned long ea;
  2979. bool cross_endian;
  2980. err = 0;
  2981. size = GETSIZE(op->type);
  2982. type = GETTYPE(op->type);
  2983. cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
  2984. ea = truncate_if_32bit(regs->msr, op->ea);
  2985. switch (type) {
  2986. case LARX:
  2987. if (ea & (size - 1))
  2988. return -EACCES; /* can't handle misaligned */
  2989. if (!address_ok(regs, ea, size))
  2990. return -EFAULT;
  2991. err = 0;
  2992. val = 0;
  2993. switch (size) {
  2994. #ifdef __powerpc64__
  2995. case 1:
  2996. __get_user_asmx(val, ea, err, "lbarx");
  2997. break;
  2998. case 2:
  2999. __get_user_asmx(val, ea, err, "lharx");
  3000. break;
  3001. #endif
  3002. case 4:
  3003. __get_user_asmx(val, ea, err, "lwarx");
  3004. break;
  3005. #ifdef __powerpc64__
  3006. case 8:
  3007. __get_user_asmx(val, ea, err, "ldarx");
  3008. break;
  3009. case 16:
  3010. err = do_lqarx(ea, &regs->gpr[op->reg]);
  3011. break;
  3012. #endif
  3013. default:
  3014. return -EINVAL;
  3015. }
  3016. if (err) {
  3017. regs->dar = ea;
  3018. break;
  3019. }
  3020. if (size < 16)
  3021. regs->gpr[op->reg] = val;
  3022. break;
  3023. case STCX:
  3024. if (ea & (size - 1))
  3025. return -EACCES; /* can't handle misaligned */
  3026. if (!address_ok(regs, ea, size))
  3027. return -EFAULT;
  3028. err = 0;
  3029. switch (size) {
  3030. #ifdef __powerpc64__
  3031. case 1:
  3032. __put_user_asmx(op->val, ea, err, "stbcx.", cr);
  3033. break;
  3034. case 2:
  3035. __put_user_asmx(op->val, ea, err, "sthcx.", cr);
  3036. break;
  3037. #endif
  3038. case 4:
  3039. __put_user_asmx(op->val, ea, err, "stwcx.", cr);
  3040. break;
  3041. #ifdef __powerpc64__
  3042. case 8:
  3043. __put_user_asmx(op->val, ea, err, "stdcx.", cr);
  3044. break;
  3045. case 16:
  3046. err = do_stqcx(ea, regs->gpr[op->reg],
  3047. regs->gpr[op->reg + 1], &cr);
  3048. break;
  3049. #endif
  3050. default:
  3051. return -EINVAL;
  3052. }
  3053. if (!err)
  3054. regs->ccr = (regs->ccr & 0x0fffffff) |
  3055. (cr & 0xe0000000) |
  3056. ((regs->xer >> 3) & 0x10000000);
  3057. else
  3058. regs->dar = ea;
  3059. break;
  3060. case LOAD:
  3061. #ifdef __powerpc64__
  3062. if (size == 16) {
  3063. err = emulate_lq(regs, ea, op->reg, cross_endian);
  3064. break;
  3065. }
  3066. #endif
  3067. err = read_mem(&regs->gpr[op->reg], ea, size, regs);
  3068. if (!err) {
  3069. if (op->type & SIGNEXT)
  3070. do_signext(&regs->gpr[op->reg], size);
  3071. if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
  3072. do_byterev(&regs->gpr[op->reg], size);
  3073. }
  3074. break;
  3075. #ifdef CONFIG_PPC_FPU
  3076. case LOAD_FP:
  3077. /*
  3078. * If the instruction is in userspace, we can emulate it even
  3079. * if the VMX state is not live, because we have the state
  3080. * stored in the thread_struct. If the instruction is in
  3081. * the kernel, we must not touch the state in the thread_struct.
  3082. */
  3083. if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
  3084. return 0;
  3085. err = do_fp_load(op, ea, regs, cross_endian);
  3086. break;
  3087. #endif
  3088. #ifdef CONFIG_ALTIVEC
  3089. case LOAD_VMX:
  3090. if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
  3091. return 0;
  3092. err = do_vec_load(op->reg, ea, size, regs, cross_endian);
  3093. break;
  3094. #endif
  3095. #ifdef CONFIG_VSX
  3096. case LOAD_VSX: {
  3097. unsigned long msrbit = MSR_VSX;
  3098. /*
  3099. * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
  3100. * when the target of the instruction is a vector register.
  3101. */
  3102. if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
  3103. msrbit = MSR_VEC;
  3104. if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
  3105. return 0;
  3106. err = do_vsx_load(op, ea, regs, cross_endian);
  3107. break;
  3108. }
  3109. #endif
  3110. case LOAD_MULTI:
  3111. if (!address_ok(regs, ea, size))
  3112. return -EFAULT;
  3113. rd = op->reg;
  3114. for (i = 0; i < size; i += 4) {
  3115. unsigned int v32 = 0;
  3116. nb = size - i;
  3117. if (nb > 4)
  3118. nb = 4;
  3119. err = copy_mem_in((u8 *) &v32, ea, nb, regs);
  3120. if (err)
  3121. break;
  3122. if (unlikely(cross_endian))
  3123. v32 = byterev_4(v32);
  3124. regs->gpr[rd] = v32;
  3125. ea += 4;
  3126. /* reg number wraps from 31 to 0 for lsw[ix] */
  3127. rd = (rd + 1) & 0x1f;
  3128. }
  3129. break;
  3130. case STORE:
  3131. #ifdef __powerpc64__
  3132. if (size == 16) {
  3133. err = emulate_stq(regs, ea, op->reg, cross_endian);
  3134. break;
  3135. }
  3136. #endif
  3137. if ((op->type & UPDATE) && size == sizeof(long) &&
  3138. op->reg == 1 && op->update_reg == 1 &&
  3139. !(regs->msr & MSR_PR) &&
  3140. ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
  3141. err = handle_stack_update(ea, regs);
  3142. break;
  3143. }
  3144. if (unlikely(cross_endian))
  3145. do_byterev(&op->val, size);
  3146. err = write_mem(op->val, ea, size, regs);
  3147. break;
  3148. #ifdef CONFIG_PPC_FPU
  3149. case STORE_FP:
  3150. if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
  3151. return 0;
  3152. err = do_fp_store(op, ea, regs, cross_endian);
  3153. break;
  3154. #endif
  3155. #ifdef CONFIG_ALTIVEC
  3156. case STORE_VMX:
  3157. if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
  3158. return 0;
  3159. err = do_vec_store(op->reg, ea, size, regs, cross_endian);
  3160. break;
  3161. #endif
  3162. #ifdef CONFIG_VSX
  3163. case STORE_VSX: {
  3164. unsigned long msrbit = MSR_VSX;
  3165. /*
  3166. * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
  3167. * when the target of the instruction is a vector register.
  3168. */
  3169. if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
  3170. msrbit = MSR_VEC;
  3171. if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
  3172. return 0;
  3173. err = do_vsx_store(op, ea, regs, cross_endian);
  3174. break;
  3175. }
  3176. #endif
  3177. case STORE_MULTI:
  3178. if (!address_ok(regs, ea, size))
  3179. return -EFAULT;
  3180. rd = op->reg;
  3181. for (i = 0; i < size; i += 4) {
  3182. unsigned int v32 = regs->gpr[rd];
  3183. nb = size - i;
  3184. if (nb > 4)
  3185. nb = 4;
  3186. if (unlikely(cross_endian))
  3187. v32 = byterev_4(v32);
  3188. err = copy_mem_out((u8 *) &v32, ea, nb, regs);
  3189. if (err)
  3190. break;
  3191. ea += 4;
  3192. /* reg number wraps from 31 to 0 for stsw[ix] */
  3193. rd = (rd + 1) & 0x1f;
  3194. }
  3195. break;
  3196. default:
  3197. return -EINVAL;
  3198. }
  3199. if (err)
  3200. return err;
  3201. if (op->type & UPDATE)
  3202. regs->gpr[op->update_reg] = op->ea;
  3203. return 0;
  3204. }
  3205. NOKPROBE_SYMBOL(emulate_loadstore);
  3206. /*
  3207. * Emulate instructions that cause a transfer of control,
  3208. * loads and stores, and a few other instructions.
  3209. * Returns 1 if the step was emulated, 0 if not,
  3210. * or -1 if the instruction is one that should not be stepped,
  3211. * such as an rfid, or a mtmsrd that would clear MSR_RI.
  3212. */
  3213. int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
  3214. {
  3215. struct instruction_op op;
  3216. int r, err, type;
  3217. unsigned long val;
  3218. unsigned long ea;
  3219. r = analyse_instr(&op, regs, instr);
  3220. if (r < 0)
  3221. return r;
  3222. if (r > 0) {
  3223. emulate_update_regs(regs, &op);
  3224. return 1;
  3225. }
  3226. err = 0;
  3227. type = GETTYPE(op.type);
  3228. if (OP_IS_LOAD_STORE(type)) {
  3229. err = emulate_loadstore(regs, &op);
  3230. if (err)
  3231. return 0;
  3232. goto instr_done;
  3233. }
  3234. switch (type) {
  3235. case CACHEOP:
  3236. ea = truncate_if_32bit(regs->msr, op.ea);
  3237. if (!address_ok(regs, ea, 8))
  3238. return 0;
  3239. switch (op.type & CACHEOP_MASK) {
  3240. case DCBST:
  3241. __cacheop_user_asmx(ea, err, "dcbst");
  3242. break;
  3243. case DCBF:
  3244. __cacheop_user_asmx(ea, err, "dcbf");
  3245. break;
  3246. case DCBTST:
  3247. if (op.reg == 0)
  3248. prefetchw((void *) ea);
  3249. break;
  3250. case DCBT:
  3251. if (op.reg == 0)
  3252. prefetch((void *) ea);
  3253. break;
  3254. case ICBI:
  3255. __cacheop_user_asmx(ea, err, "icbi");
  3256. break;
  3257. case DCBZ:
  3258. err = emulate_dcbz(ea, regs);
  3259. break;
  3260. }
  3261. if (err) {
  3262. regs->dar = ea;
  3263. return 0;
  3264. }
  3265. goto instr_done;
  3266. case MFMSR:
  3267. regs->gpr[op.reg] = regs->msr & MSR_MASK;
  3268. goto instr_done;
  3269. case MTMSR:
  3270. val = regs->gpr[op.reg];
  3271. if ((val & MSR_RI) == 0)
  3272. /* can't step mtmsr[d] that would clear MSR_RI */
  3273. return -1;
  3274. /* here op.val is the mask of bits to change */
  3275. regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
  3276. goto instr_done;
  3277. case SYSCALL: /* sc */
  3278. /*
  3279. * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
  3280. * single step a system call instruction:
  3281. *
  3282. * Successful completion for an instruction means that the
  3283. * instruction caused no other interrupt. Thus a Trace
  3284. * interrupt never occurs for a System Call or System Call
  3285. * Vectored instruction, or for a Trap instruction that
  3286. * traps.
  3287. */
  3288. return -1;
  3289. case SYSCALL_VECTORED_0: /* scv 0 */
  3290. return -1;
  3291. case RFI:
  3292. return -1;
  3293. }
  3294. return 0;
  3295. instr_done:
  3296. regs_set_return_ip(regs,
  3297. truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
  3298. return 1;
  3299. }
  3300. NOKPROBE_SYMBOL(emulate_step);