visemul.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* visemul.c: Emulation of VIS instructions.
  3. *
  4. * Copyright (C) 2006 David S. Miller ([email protected])
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/thread_info.h>
  9. #include <linux/perf_event.h>
  10. #include <asm/ptrace.h>
  11. #include <asm/pstate.h>
  12. #include <asm/fpumacro.h>
  13. #include <linux/uaccess.h>
  14. #include <asm/cacheflush.h>
  15. /* OPF field of various VIS instructions. */
  16. /* 000111011 - four 16-bit packs */
  17. #define FPACK16_OPF 0x03b
  18. /* 000111010 - two 32-bit packs */
  19. #define FPACK32_OPF 0x03a
  20. /* 000111101 - four 16-bit packs */
  21. #define FPACKFIX_OPF 0x03d
  22. /* 001001101 - four 16-bit expands */
  23. #define FEXPAND_OPF 0x04d
  24. /* 001001011 - two 32-bit merges */
  25. #define FPMERGE_OPF 0x04b
  26. /* 000110001 - 8-by-16-bit partitioned product */
  27. #define FMUL8x16_OPF 0x031
  28. /* 000110011 - 8-by-16-bit upper alpha partitioned product */
  29. #define FMUL8x16AU_OPF 0x033
  30. /* 000110101 - 8-by-16-bit lower alpha partitioned product */
  31. #define FMUL8x16AL_OPF 0x035
  32. /* 000110110 - upper 8-by-16-bit partitioned product */
  33. #define FMUL8SUx16_OPF 0x036
  34. /* 000110111 - lower 8-by-16-bit partitioned product */
  35. #define FMUL8ULx16_OPF 0x037
  36. /* 000111000 - upper 8-by-16-bit partitioned product */
  37. #define FMULD8SUx16_OPF 0x038
  38. /* 000111001 - lower unsigned 8-by-16-bit partitioned product */
  39. #define FMULD8ULx16_OPF 0x039
  40. /* 000101000 - four 16-bit compare; set rd if src1 > src2 */
  41. #define FCMPGT16_OPF 0x028
  42. /* 000101100 - two 32-bit compare; set rd if src1 > src2 */
  43. #define FCMPGT32_OPF 0x02c
  44. /* 000100000 - four 16-bit compare; set rd if src1 <= src2 */
  45. #define FCMPLE16_OPF 0x020
  46. /* 000100100 - two 32-bit compare; set rd if src1 <= src2 */
  47. #define FCMPLE32_OPF 0x024
  48. /* 000100010 - four 16-bit compare; set rd if src1 != src2 */
  49. #define FCMPNE16_OPF 0x022
  50. /* 000100110 - two 32-bit compare; set rd if src1 != src2 */
  51. #define FCMPNE32_OPF 0x026
  52. /* 000101010 - four 16-bit compare; set rd if src1 == src2 */
  53. #define FCMPEQ16_OPF 0x02a
  54. /* 000101110 - two 32-bit compare; set rd if src1 == src2 */
  55. #define FCMPEQ32_OPF 0x02e
  56. /* 000000000 - Eight 8-bit edge boundary processing */
  57. #define EDGE8_OPF 0x000
  58. /* 000000001 - Eight 8-bit edge boundary processing, no CC */
  59. #define EDGE8N_OPF 0x001
  60. /* 000000010 - Eight 8-bit edge boundary processing, little-endian */
  61. #define EDGE8L_OPF 0x002
  62. /* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */
  63. #define EDGE8LN_OPF 0x003
  64. /* 000000100 - Four 16-bit edge boundary processing */
  65. #define EDGE16_OPF 0x004
  66. /* 000000101 - Four 16-bit edge boundary processing, no CC */
  67. #define EDGE16N_OPF 0x005
  68. /* 000000110 - Four 16-bit edge boundary processing, little-endian */
  69. #define EDGE16L_OPF 0x006
  70. /* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */
  71. #define EDGE16LN_OPF 0x007
  72. /* 000001000 - Two 32-bit edge boundary processing */
  73. #define EDGE32_OPF 0x008
  74. /* 000001001 - Two 32-bit edge boundary processing, no CC */
  75. #define EDGE32N_OPF 0x009
  76. /* 000001010 - Two 32-bit edge boundary processing, little-endian */
  77. #define EDGE32L_OPF 0x00a
  78. /* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */
  79. #define EDGE32LN_OPF 0x00b
  80. /* 000111110 - distance between 8 8-bit components */
  81. #define PDIST_OPF 0x03e
  82. /* 000010000 - convert 8-bit 3-D address to blocked byte address */
  83. #define ARRAY8_OPF 0x010
  84. /* 000010010 - convert 16-bit 3-D address to blocked byte address */
  85. #define ARRAY16_OPF 0x012
  86. /* 000010100 - convert 32-bit 3-D address to blocked byte address */
  87. #define ARRAY32_OPF 0x014
  88. /* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */
  89. #define BMASK_OPF 0x019
  90. /* 001001100 - Permute bytes as specified by GSR.MASK */
  91. #define BSHUFFLE_OPF 0x04c
  92. #define VIS_OPF_SHIFT 5
  93. #define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT)
  94. #define RS1(INSN) (((INSN) >> 14) & 0x1f)
  95. #define RS2(INSN) (((INSN) >> 0) & 0x1f)
  96. #define RD(INSN) (((INSN) >> 25) & 0x1f)
  97. static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
  98. unsigned int rd, int from_kernel)
  99. {
  100. if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
  101. if (from_kernel != 0)
  102. __asm__ __volatile__("flushw");
  103. else
  104. flushw_user();
  105. }
  106. }
  107. static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
  108. {
  109. unsigned long value, fp;
  110. if (reg < 16)
  111. return (!reg ? 0 : regs->u_regs[reg]);
  112. fp = regs->u_regs[UREG_FP];
  113. if (regs->tstate & TSTATE_PRIV) {
  114. struct reg_window *win;
  115. win = (struct reg_window *)(fp + STACK_BIAS);
  116. value = win->locals[reg - 16];
  117. } else if (!test_thread_64bit_stack(fp)) {
  118. struct reg_window32 __user *win32;
  119. win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
  120. get_user(value, &win32->locals[reg - 16]);
  121. } else {
  122. struct reg_window __user *win;
  123. win = (struct reg_window __user *)(fp + STACK_BIAS);
  124. get_user(value, &win->locals[reg - 16]);
  125. }
  126. return value;
  127. }
  128. static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg,
  129. struct pt_regs *regs)
  130. {
  131. unsigned long fp = regs->u_regs[UREG_FP];
  132. BUG_ON(reg < 16);
  133. BUG_ON(regs->tstate & TSTATE_PRIV);
  134. if (!test_thread_64bit_stack(fp)) {
  135. struct reg_window32 __user *win32;
  136. win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
  137. return (unsigned long __user *)&win32->locals[reg - 16];
  138. } else {
  139. struct reg_window __user *win;
  140. win = (struct reg_window __user *)(fp + STACK_BIAS);
  141. return &win->locals[reg - 16];
  142. }
  143. }
  144. static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg,
  145. struct pt_regs *regs)
  146. {
  147. BUG_ON(reg >= 16);
  148. BUG_ON(regs->tstate & TSTATE_PRIV);
  149. return &regs->u_regs[reg];
  150. }
  151. static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd)
  152. {
  153. if (rd < 16) {
  154. unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs);
  155. *rd_kern = val;
  156. } else {
  157. unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs);
  158. if (!test_thread_64bit_stack(regs->u_regs[UREG_FP]))
  159. __put_user((u32)val, (u32 __user *)rd_user);
  160. else
  161. __put_user(val, rd_user);
  162. }
  163. }
  164. static inline unsigned long fpd_regval(struct fpustate *f,
  165. unsigned int insn_regnum)
  166. {
  167. insn_regnum = (((insn_regnum & 1) << 5) |
  168. (insn_regnum & 0x1e));
  169. return *(unsigned long *) &f->regs[insn_regnum];
  170. }
  171. static inline unsigned long *fpd_regaddr(struct fpustate *f,
  172. unsigned int insn_regnum)
  173. {
  174. insn_regnum = (((insn_regnum & 1) << 5) |
  175. (insn_regnum & 0x1e));
  176. return (unsigned long *) &f->regs[insn_regnum];
  177. }
  178. static inline unsigned int fps_regval(struct fpustate *f,
  179. unsigned int insn_regnum)
  180. {
  181. return f->regs[insn_regnum];
  182. }
  183. static inline unsigned int *fps_regaddr(struct fpustate *f,
  184. unsigned int insn_regnum)
  185. {
  186. return &f->regs[insn_regnum];
  187. }
  188. struct edge_tab {
  189. u16 left, right;
  190. };
  191. static struct edge_tab edge8_tab[8] = {
  192. { 0xff, 0x80 },
  193. { 0x7f, 0xc0 },
  194. { 0x3f, 0xe0 },
  195. { 0x1f, 0xf0 },
  196. { 0x0f, 0xf8 },
  197. { 0x07, 0xfc },
  198. { 0x03, 0xfe },
  199. { 0x01, 0xff },
  200. };
  201. static struct edge_tab edge8_tab_l[8] = {
  202. { 0xff, 0x01 },
  203. { 0xfe, 0x03 },
  204. { 0xfc, 0x07 },
  205. { 0xf8, 0x0f },
  206. { 0xf0, 0x1f },
  207. { 0xe0, 0x3f },
  208. { 0xc0, 0x7f },
  209. { 0x80, 0xff },
  210. };
  211. static struct edge_tab edge16_tab[4] = {
  212. { 0xf, 0x8 },
  213. { 0x7, 0xc },
  214. { 0x3, 0xe },
  215. { 0x1, 0xf },
  216. };
  217. static struct edge_tab edge16_tab_l[4] = {
  218. { 0xf, 0x1 },
  219. { 0xe, 0x3 },
  220. { 0xc, 0x7 },
  221. { 0x8, 0xf },
  222. };
  223. static struct edge_tab edge32_tab[2] = {
  224. { 0x3, 0x2 },
  225. { 0x1, 0x3 },
  226. };
  227. static struct edge_tab edge32_tab_l[2] = {
  228. { 0x3, 0x1 },
  229. { 0x2, 0x3 },
  230. };
  231. static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
  232. {
  233. unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val;
  234. u16 left, right;
  235. maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
  236. orig_rs1 = rs1 = fetch_reg(RS1(insn), regs);
  237. orig_rs2 = rs2 = fetch_reg(RS2(insn), regs);
  238. if (test_thread_flag(TIF_32BIT)) {
  239. rs1 = rs1 & 0xffffffff;
  240. rs2 = rs2 & 0xffffffff;
  241. }
  242. switch (opf) {
  243. default:
  244. case EDGE8_OPF:
  245. case EDGE8N_OPF:
  246. left = edge8_tab[rs1 & 0x7].left;
  247. right = edge8_tab[rs2 & 0x7].right;
  248. break;
  249. case EDGE8L_OPF:
  250. case EDGE8LN_OPF:
  251. left = edge8_tab_l[rs1 & 0x7].left;
  252. right = edge8_tab_l[rs2 & 0x7].right;
  253. break;
  254. case EDGE16_OPF:
  255. case EDGE16N_OPF:
  256. left = edge16_tab[(rs1 >> 1) & 0x3].left;
  257. right = edge16_tab[(rs2 >> 1) & 0x3].right;
  258. break;
  259. case EDGE16L_OPF:
  260. case EDGE16LN_OPF:
  261. left = edge16_tab_l[(rs1 >> 1) & 0x3].left;
  262. right = edge16_tab_l[(rs2 >> 1) & 0x3].right;
  263. break;
  264. case EDGE32_OPF:
  265. case EDGE32N_OPF:
  266. left = edge32_tab[(rs1 >> 2) & 0x1].left;
  267. right = edge32_tab[(rs2 >> 2) & 0x1].right;
  268. break;
  269. case EDGE32L_OPF:
  270. case EDGE32LN_OPF:
  271. left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
  272. right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
  273. break;
  274. }
  275. if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
  276. rd_val = right & left;
  277. else
  278. rd_val = left;
  279. store_reg(regs, rd_val, RD(insn));
  280. switch (opf) {
  281. case EDGE8_OPF:
  282. case EDGE8L_OPF:
  283. case EDGE16_OPF:
  284. case EDGE16L_OPF:
  285. case EDGE32_OPF:
  286. case EDGE32L_OPF: {
  287. unsigned long ccr, tstate;
  288. __asm__ __volatile__("subcc %1, %2, %%g0\n\t"
  289. "rd %%ccr, %0"
  290. : "=r" (ccr)
  291. : "r" (orig_rs1), "r" (orig_rs2)
  292. : "cc");
  293. tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
  294. regs->tstate = tstate | (ccr << 32UL);
  295. }
  296. }
  297. }
  298. static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
  299. {
  300. unsigned long rs1, rs2, rd_val;
  301. unsigned int bits, bits_mask;
  302. maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
  303. rs1 = fetch_reg(RS1(insn), regs);
  304. rs2 = fetch_reg(RS2(insn), regs);
  305. bits = (rs2 > 5 ? 5 : rs2);
  306. bits_mask = (1UL << bits) - 1UL;
  307. rd_val = ((((rs1 >> 11) & 0x3) << 0) |
  308. (((rs1 >> 33) & 0x3) << 2) |
  309. (((rs1 >> 55) & 0x1) << 4) |
  310. (((rs1 >> 13) & 0xf) << 5) |
  311. (((rs1 >> 35) & 0xf) << 9) |
  312. (((rs1 >> 56) & 0xf) << 13) |
  313. (((rs1 >> 17) & bits_mask) << 17) |
  314. (((rs1 >> 39) & bits_mask) << (17 + bits)) |
  315. (((rs1 >> 60) & 0xf) << (17 + (2*bits))));
  316. switch (opf) {
  317. case ARRAY16_OPF:
  318. rd_val <<= 1;
  319. break;
  320. case ARRAY32_OPF:
  321. rd_val <<= 2;
  322. }
  323. store_reg(regs, rd_val, RD(insn));
  324. }
  325. static void bmask(struct pt_regs *regs, unsigned int insn)
  326. {
  327. unsigned long rs1, rs2, rd_val, gsr;
  328. maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
  329. rs1 = fetch_reg(RS1(insn), regs);
  330. rs2 = fetch_reg(RS2(insn), regs);
  331. rd_val = rs1 + rs2;
  332. store_reg(regs, rd_val, RD(insn));
  333. gsr = current_thread_info()->gsr[0] & 0xffffffff;
  334. gsr |= rd_val << 32UL;
  335. current_thread_info()->gsr[0] = gsr;
  336. }
  337. static void bshuffle(struct pt_regs *regs, unsigned int insn)
  338. {
  339. struct fpustate *f = FPUSTATE;
  340. unsigned long rs1, rs2, rd_val;
  341. unsigned long bmask, i;
  342. bmask = current_thread_info()->gsr[0] >> 32UL;
  343. rs1 = fpd_regval(f, RS1(insn));
  344. rs2 = fpd_regval(f, RS2(insn));
  345. rd_val = 0UL;
  346. for (i = 0; i < 8; i++) {
  347. unsigned long which = (bmask >> (i * 4)) & 0xf;
  348. unsigned long byte;
  349. if (which < 8)
  350. byte = (rs1 >> (which * 8)) & 0xff;
  351. else
  352. byte = (rs2 >> ((which-8)*8)) & 0xff;
  353. rd_val |= (byte << (i * 8));
  354. }
  355. *fpd_regaddr(f, RD(insn)) = rd_val;
  356. }
  357. static void pdist(struct pt_regs *regs, unsigned int insn)
  358. {
  359. struct fpustate *f = FPUSTATE;
  360. unsigned long rs1, rs2, *rd, rd_val;
  361. unsigned long i;
  362. rs1 = fpd_regval(f, RS1(insn));
  363. rs2 = fpd_regval(f, RS2(insn));
  364. rd = fpd_regaddr(f, RD(insn));
  365. rd_val = *rd;
  366. for (i = 0; i < 8; i++) {
  367. s16 s1, s2;
  368. s1 = (rs1 >> (56 - (i * 8))) & 0xff;
  369. s2 = (rs2 >> (56 - (i * 8))) & 0xff;
  370. /* Absolute value of difference. */
  371. s1 -= s2;
  372. if (s1 < 0)
  373. s1 = ~s1 + 1;
  374. rd_val += s1;
  375. }
  376. *rd = rd_val;
  377. }
  378. static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
  379. {
  380. struct fpustate *f = FPUSTATE;
  381. unsigned long rs1, rs2, gsr, scale, rd_val;
  382. gsr = current_thread_info()->gsr[0];
  383. scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f);
  384. switch (opf) {
  385. case FPACK16_OPF: {
  386. unsigned long byte;
  387. rs2 = fpd_regval(f, RS2(insn));
  388. rd_val = 0;
  389. for (byte = 0; byte < 4; byte++) {
  390. unsigned int val;
  391. s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL;
  392. int scaled = src << scale;
  393. int from_fixed = scaled >> 7;
  394. val = ((from_fixed < 0) ?
  395. 0 :
  396. (from_fixed > 255) ?
  397. 255 : from_fixed);
  398. rd_val |= (val << (8 * byte));
  399. }
  400. *fps_regaddr(f, RD(insn)) = rd_val;
  401. break;
  402. }
  403. case FPACK32_OPF: {
  404. unsigned long word;
  405. rs1 = fpd_regval(f, RS1(insn));
  406. rs2 = fpd_regval(f, RS2(insn));
  407. rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL);
  408. for (word = 0; word < 2; word++) {
  409. unsigned long val;
  410. s32 src = (rs2 >> (word * 32UL));
  411. s64 scaled = src << scale;
  412. s64 from_fixed = scaled >> 23;
  413. val = ((from_fixed < 0) ?
  414. 0 :
  415. (from_fixed > 255) ?
  416. 255 : from_fixed);
  417. rd_val |= (val << (32 * word));
  418. }
  419. *fpd_regaddr(f, RD(insn)) = rd_val;
  420. break;
  421. }
  422. case FPACKFIX_OPF: {
  423. unsigned long word;
  424. rs2 = fpd_regval(f, RS2(insn));
  425. rd_val = 0;
  426. for (word = 0; word < 2; word++) {
  427. long val;
  428. s32 src = (rs2 >> (word * 32UL));
  429. s64 scaled = src << scale;
  430. s64 from_fixed = scaled >> 16;
  431. val = ((from_fixed < -32768) ?
  432. -32768 :
  433. (from_fixed > 32767) ?
  434. 32767 : from_fixed);
  435. rd_val |= ((val & 0xffff) << (word * 16));
  436. }
  437. *fps_regaddr(f, RD(insn)) = rd_val;
  438. break;
  439. }
  440. case FEXPAND_OPF: {
  441. unsigned long byte;
  442. rs2 = fps_regval(f, RS2(insn));
  443. rd_val = 0;
  444. for (byte = 0; byte < 4; byte++) {
  445. unsigned long val;
  446. u8 src = (rs2 >> (byte * 8)) & 0xff;
  447. val = src << 4;
  448. rd_val |= (val << (byte * 16));
  449. }
  450. *fpd_regaddr(f, RD(insn)) = rd_val;
  451. break;
  452. }
  453. case FPMERGE_OPF: {
  454. rs1 = fps_regval(f, RS1(insn));
  455. rs2 = fps_regval(f, RS2(insn));
  456. rd_val = (((rs2 & 0x000000ff) << 0) |
  457. ((rs1 & 0x000000ff) << 8) |
  458. ((rs2 & 0x0000ff00) << 8) |
  459. ((rs1 & 0x0000ff00) << 16) |
  460. ((rs2 & 0x00ff0000) << 16) |
  461. ((rs1 & 0x00ff0000) << 24) |
  462. ((rs2 & 0xff000000) << 24) |
  463. ((rs1 & 0xff000000) << 32));
  464. *fpd_regaddr(f, RD(insn)) = rd_val;
  465. break;
  466. }
  467. }
  468. }
  469. static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
  470. {
  471. struct fpustate *f = FPUSTATE;
  472. unsigned long rs1, rs2, rd_val;
  473. switch (opf) {
  474. case FMUL8x16_OPF: {
  475. unsigned long byte;
  476. rs1 = fps_regval(f, RS1(insn));
  477. rs2 = fpd_regval(f, RS2(insn));
  478. rd_val = 0;
  479. for (byte = 0; byte < 4; byte++) {
  480. u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
  481. s16 src2 = (rs2 >> (byte * 16)) & 0xffff;
  482. u32 prod = src1 * src2;
  483. u16 scaled = ((prod & 0x00ffff00) >> 8);
  484. /* Round up. */
  485. if (prod & 0x80)
  486. scaled++;
  487. rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
  488. }
  489. *fpd_regaddr(f, RD(insn)) = rd_val;
  490. break;
  491. }
  492. case FMUL8x16AU_OPF:
  493. case FMUL8x16AL_OPF: {
  494. unsigned long byte;
  495. s16 src2;
  496. rs1 = fps_regval(f, RS1(insn));
  497. rs2 = fps_regval(f, RS2(insn));
  498. rd_val = 0;
  499. src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0);
  500. for (byte = 0; byte < 4; byte++) {
  501. u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
  502. u32 prod = src1 * src2;
  503. u16 scaled = ((prod & 0x00ffff00) >> 8);
  504. /* Round up. */
  505. if (prod & 0x80)
  506. scaled++;
  507. rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
  508. }
  509. *fpd_regaddr(f, RD(insn)) = rd_val;
  510. break;
  511. }
  512. case FMUL8SUx16_OPF:
  513. case FMUL8ULx16_OPF: {
  514. unsigned long byte, ushift;
  515. rs1 = fpd_regval(f, RS1(insn));
  516. rs2 = fpd_regval(f, RS2(insn));
  517. rd_val = 0;
  518. ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0;
  519. for (byte = 0; byte < 4; byte++) {
  520. u16 src1;
  521. s16 src2;
  522. u32 prod;
  523. u16 scaled;
  524. src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
  525. src2 = ((rs2 >> (16 * byte)) & 0xffff);
  526. prod = src1 * src2;
  527. scaled = ((prod & 0x00ffff00) >> 8);
  528. /* Round up. */
  529. if (prod & 0x80)
  530. scaled++;
  531. rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
  532. }
  533. *fpd_regaddr(f, RD(insn)) = rd_val;
  534. break;
  535. }
  536. case FMULD8SUx16_OPF:
  537. case FMULD8ULx16_OPF: {
  538. unsigned long byte, ushift;
  539. rs1 = fps_regval(f, RS1(insn));
  540. rs2 = fps_regval(f, RS2(insn));
  541. rd_val = 0;
  542. ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0;
  543. for (byte = 0; byte < 2; byte++) {
  544. u16 src1;
  545. s16 src2;
  546. u32 prod;
  547. u16 scaled;
  548. src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
  549. src2 = ((rs2 >> (16 * byte)) & 0xffff);
  550. prod = src1 * src2;
  551. scaled = ((prod & 0x00ffff00) >> 8);
  552. /* Round up. */
  553. if (prod & 0x80)
  554. scaled++;
  555. rd_val |= ((scaled & 0xffffUL) <<
  556. ((byte * 32UL) + 7UL));
  557. }
  558. *fpd_regaddr(f, RD(insn)) = rd_val;
  559. break;
  560. }
  561. }
  562. }
  563. static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
  564. {
  565. struct fpustate *f = FPUSTATE;
  566. unsigned long rs1, rs2, rd_val, i;
  567. rs1 = fpd_regval(f, RS1(insn));
  568. rs2 = fpd_regval(f, RS2(insn));
  569. rd_val = 0;
  570. switch (opf) {
  571. case FCMPGT16_OPF:
  572. for (i = 0; i < 4; i++) {
  573. s16 a = (rs1 >> (i * 16)) & 0xffff;
  574. s16 b = (rs2 >> (i * 16)) & 0xffff;
  575. if (a > b)
  576. rd_val |= 8 >> i;
  577. }
  578. break;
  579. case FCMPGT32_OPF:
  580. for (i = 0; i < 2; i++) {
  581. s32 a = (rs1 >> (i * 32)) & 0xffffffff;
  582. s32 b = (rs2 >> (i * 32)) & 0xffffffff;
  583. if (a > b)
  584. rd_val |= 2 >> i;
  585. }
  586. break;
  587. case FCMPLE16_OPF:
  588. for (i = 0; i < 4; i++) {
  589. s16 a = (rs1 >> (i * 16)) & 0xffff;
  590. s16 b = (rs2 >> (i * 16)) & 0xffff;
  591. if (a <= b)
  592. rd_val |= 8 >> i;
  593. }
  594. break;
  595. case FCMPLE32_OPF:
  596. for (i = 0; i < 2; i++) {
  597. s32 a = (rs1 >> (i * 32)) & 0xffffffff;
  598. s32 b = (rs2 >> (i * 32)) & 0xffffffff;
  599. if (a <= b)
  600. rd_val |= 2 >> i;
  601. }
  602. break;
  603. case FCMPNE16_OPF:
  604. for (i = 0; i < 4; i++) {
  605. s16 a = (rs1 >> (i * 16)) & 0xffff;
  606. s16 b = (rs2 >> (i * 16)) & 0xffff;
  607. if (a != b)
  608. rd_val |= 8 >> i;
  609. }
  610. break;
  611. case FCMPNE32_OPF:
  612. for (i = 0; i < 2; i++) {
  613. s32 a = (rs1 >> (i * 32)) & 0xffffffff;
  614. s32 b = (rs2 >> (i * 32)) & 0xffffffff;
  615. if (a != b)
  616. rd_val |= 2 >> i;
  617. }
  618. break;
  619. case FCMPEQ16_OPF:
  620. for (i = 0; i < 4; i++) {
  621. s16 a = (rs1 >> (i * 16)) & 0xffff;
  622. s16 b = (rs2 >> (i * 16)) & 0xffff;
  623. if (a == b)
  624. rd_val |= 8 >> i;
  625. }
  626. break;
  627. case FCMPEQ32_OPF:
  628. for (i = 0; i < 2; i++) {
  629. s32 a = (rs1 >> (i * 32)) & 0xffffffff;
  630. s32 b = (rs2 >> (i * 32)) & 0xffffffff;
  631. if (a == b)
  632. rd_val |= 2 >> i;
  633. }
  634. break;
  635. }
  636. maybe_flush_windows(0, 0, RD(insn), 0);
  637. store_reg(regs, rd_val, RD(insn));
  638. }
  639. /* Emulate the VIS instructions which are not implemented in
  640. * hardware on Niagara.
  641. */
  642. int vis_emul(struct pt_regs *regs, unsigned int insn)
  643. {
  644. unsigned long pc = regs->tpc;
  645. unsigned int opf;
  646. BUG_ON(regs->tstate & TSTATE_PRIV);
  647. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
  648. if (test_thread_flag(TIF_32BIT))
  649. pc = (u32)pc;
  650. if (get_user(insn, (u32 __user *) pc))
  651. return -EFAULT;
  652. save_and_clear_fpu();
  653. opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
  654. switch (opf) {
  655. default:
  656. return -EINVAL;
  657. /* Pixel Formatting Instructions. */
  658. case FPACK16_OPF:
  659. case FPACK32_OPF:
  660. case FPACKFIX_OPF:
  661. case FEXPAND_OPF:
  662. case FPMERGE_OPF:
  663. pformat(regs, insn, opf);
  664. break;
  665. /* Partitioned Multiply Instructions */
  666. case FMUL8x16_OPF:
  667. case FMUL8x16AU_OPF:
  668. case FMUL8x16AL_OPF:
  669. case FMUL8SUx16_OPF:
  670. case FMUL8ULx16_OPF:
  671. case FMULD8SUx16_OPF:
  672. case FMULD8ULx16_OPF:
  673. pmul(regs, insn, opf);
  674. break;
  675. /* Pixel Compare Instructions */
  676. case FCMPGT16_OPF:
  677. case FCMPGT32_OPF:
  678. case FCMPLE16_OPF:
  679. case FCMPLE32_OPF:
  680. case FCMPNE16_OPF:
  681. case FCMPNE32_OPF:
  682. case FCMPEQ16_OPF:
  683. case FCMPEQ32_OPF:
  684. pcmp(regs, insn, opf);
  685. break;
  686. /* Edge Handling Instructions */
  687. case EDGE8_OPF:
  688. case EDGE8N_OPF:
  689. case EDGE8L_OPF:
  690. case EDGE8LN_OPF:
  691. case EDGE16_OPF:
  692. case EDGE16N_OPF:
  693. case EDGE16L_OPF:
  694. case EDGE16LN_OPF:
  695. case EDGE32_OPF:
  696. case EDGE32N_OPF:
  697. case EDGE32L_OPF:
  698. case EDGE32LN_OPF:
  699. edge(regs, insn, opf);
  700. break;
  701. /* Pixel Component Distance */
  702. case PDIST_OPF:
  703. pdist(regs, insn);
  704. break;
  705. /* Three-Dimensional Array Addressing Instructions */
  706. case ARRAY8_OPF:
  707. case ARRAY16_OPF:
  708. case ARRAY32_OPF:
  709. array(regs, insn, opf);
  710. break;
  711. /* Byte Mask and Shuffle Instructions */
  712. case BMASK_OPF:
  713. bmask(regs, insn);
  714. break;
  715. case BSHUFFLE_OPF:
  716. bshuffle(regs, insn);
  717. break;
  718. }
  719. regs->tpc = regs->tnpc;
  720. regs->tnpc += 4;
  721. return 0;
  722. }