vx-insn.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Support for Vector Instructions
  4. *
  5. * Assembler macros to generate .byte/.word code for particular
  6. * vector instructions that are supported by recent binutils (>= 2.26) only.
  7. *
  8. * Copyright IBM Corp. 2015
  9. * Author(s): Hendrik Brueckner <[email protected]>
  10. */
  11. #ifndef __ASM_S390_VX_INSN_H
  12. #define __ASM_S390_VX_INSN_H
  13. #ifdef __ASSEMBLY__
  14. /* Macros to generate vector instruction byte code */
  15. /* GR_NUM - Retrieve general-purpose register number
  16. *
  17. * @opd: Operand to store register number
  18. * @r64: String designation register in the format "%rN"
  19. */
  20. .macro GR_NUM opd gr
  21. \opd = 255
  22. .ifc \gr,%r0
  23. \opd = 0
  24. .endif
  25. .ifc \gr,%r1
  26. \opd = 1
  27. .endif
  28. .ifc \gr,%r2
  29. \opd = 2
  30. .endif
  31. .ifc \gr,%r3
  32. \opd = 3
  33. .endif
  34. .ifc \gr,%r4
  35. \opd = 4
  36. .endif
  37. .ifc \gr,%r5
  38. \opd = 5
  39. .endif
  40. .ifc \gr,%r6
  41. \opd = 6
  42. .endif
  43. .ifc \gr,%r7
  44. \opd = 7
  45. .endif
  46. .ifc \gr,%r8
  47. \opd = 8
  48. .endif
  49. .ifc \gr,%r9
  50. \opd = 9
  51. .endif
  52. .ifc \gr,%r10
  53. \opd = 10
  54. .endif
  55. .ifc \gr,%r11
  56. \opd = 11
  57. .endif
  58. .ifc \gr,%r12
  59. \opd = 12
  60. .endif
  61. .ifc \gr,%r13
  62. \opd = 13
  63. .endif
  64. .ifc \gr,%r14
  65. \opd = 14
  66. .endif
  67. .ifc \gr,%r15
  68. \opd = 15
  69. .endif
  70. .if \opd == 255
  71. \opd = \gr
  72. .endif
  73. .endm
  74. /* VX_NUM - Retrieve vector register number
  75. *
  76. * @opd: Operand to store register number
  77. * @vxr: String designation register in the format "%vN"
  78. *
  79. * The vector register number is used for as input number to the
  80. * instruction and, as well as, to compute the RXB field of the
  81. * instruction.
  82. */
  83. .macro VX_NUM opd vxr
  84. \opd = 255
  85. .ifc \vxr,%v0
  86. \opd = 0
  87. .endif
  88. .ifc \vxr,%v1
  89. \opd = 1
  90. .endif
  91. .ifc \vxr,%v2
  92. \opd = 2
  93. .endif
  94. .ifc \vxr,%v3
  95. \opd = 3
  96. .endif
  97. .ifc \vxr,%v4
  98. \opd = 4
  99. .endif
  100. .ifc \vxr,%v5
  101. \opd = 5
  102. .endif
  103. .ifc \vxr,%v6
  104. \opd = 6
  105. .endif
  106. .ifc \vxr,%v7
  107. \opd = 7
  108. .endif
  109. .ifc \vxr,%v8
  110. \opd = 8
  111. .endif
  112. .ifc \vxr,%v9
  113. \opd = 9
  114. .endif
  115. .ifc \vxr,%v10
  116. \opd = 10
  117. .endif
  118. .ifc \vxr,%v11
  119. \opd = 11
  120. .endif
  121. .ifc \vxr,%v12
  122. \opd = 12
  123. .endif
  124. .ifc \vxr,%v13
  125. \opd = 13
  126. .endif
  127. .ifc \vxr,%v14
  128. \opd = 14
  129. .endif
  130. .ifc \vxr,%v15
  131. \opd = 15
  132. .endif
  133. .ifc \vxr,%v16
  134. \opd = 16
  135. .endif
  136. .ifc \vxr,%v17
  137. \opd = 17
  138. .endif
  139. .ifc \vxr,%v18
  140. \opd = 18
  141. .endif
  142. .ifc \vxr,%v19
  143. \opd = 19
  144. .endif
  145. .ifc \vxr,%v20
  146. \opd = 20
  147. .endif
  148. .ifc \vxr,%v21
  149. \opd = 21
  150. .endif
  151. .ifc \vxr,%v22
  152. \opd = 22
  153. .endif
  154. .ifc \vxr,%v23
  155. \opd = 23
  156. .endif
  157. .ifc \vxr,%v24
  158. \opd = 24
  159. .endif
  160. .ifc \vxr,%v25
  161. \opd = 25
  162. .endif
  163. .ifc \vxr,%v26
  164. \opd = 26
  165. .endif
  166. .ifc \vxr,%v27
  167. \opd = 27
  168. .endif
  169. .ifc \vxr,%v28
  170. \opd = 28
  171. .endif
  172. .ifc \vxr,%v29
  173. \opd = 29
  174. .endif
  175. .ifc \vxr,%v30
  176. \opd = 30
  177. .endif
  178. .ifc \vxr,%v31
  179. \opd = 31
  180. .endif
  181. .if \opd == 255
  182. \opd = \vxr
  183. .endif
  184. .endm
  185. /* RXB - Compute most significant bit used vector registers
  186. *
  187. * @rxb: Operand to store computed RXB value
  188. * @v1: First vector register designated operand
  189. * @v2: Second vector register designated operand
  190. * @v3: Third vector register designated operand
  191. * @v4: Fourth vector register designated operand
  192. */
  193. .macro RXB rxb v1 v2=0 v3=0 v4=0
  194. \rxb = 0
  195. .if \v1 & 0x10
  196. \rxb = \rxb | 0x08
  197. .endif
  198. .if \v2 & 0x10
  199. \rxb = \rxb | 0x04
  200. .endif
  201. .if \v3 & 0x10
  202. \rxb = \rxb | 0x02
  203. .endif
  204. .if \v4 & 0x10
  205. \rxb = \rxb | 0x01
  206. .endif
  207. .endm
  208. /* MRXB - Generate Element Size Control and RXB value
  209. *
  210. * @m: Element size control
  211. * @v1: First vector register designated operand (for RXB)
  212. * @v2: Second vector register designated operand (for RXB)
  213. * @v3: Third vector register designated operand (for RXB)
  214. * @v4: Fourth vector register designated operand (for RXB)
  215. */
  216. .macro MRXB m v1 v2=0 v3=0 v4=0
  217. rxb = 0
  218. RXB rxb, \v1, \v2, \v3, \v4
  219. .byte (\m << 4) | rxb
  220. .endm
  221. /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
  222. *
  223. * @m: Element size control
  224. * @opc: Opcode
  225. * @v1: First vector register designated operand (for RXB)
  226. * @v2: Second vector register designated operand (for RXB)
  227. * @v3: Third vector register designated operand (for RXB)
  228. * @v4: Fourth vector register designated operand (for RXB)
  229. */
  230. .macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
  231. MRXB \m, \v1, \v2, \v3, \v4
  232. .byte \opc
  233. .endm
  234. /* Vector support instructions */
  235. /* VECTOR GENERATE BYTE MASK */
  236. .macro VGBM vr imm2
  237. VX_NUM v1, \vr
  238. .word (0xE700 | ((v1&15) << 4))
  239. .word \imm2
  240. MRXBOPC 0, 0x44, v1
  241. .endm
  242. .macro VZERO vxr
  243. VGBM \vxr, 0
  244. .endm
  245. .macro VONE vxr
  246. VGBM \vxr, 0xFFFF
  247. .endm
  248. /* VECTOR LOAD VR ELEMENT FROM GR */
  249. .macro VLVG v, gr, disp, m
  250. VX_NUM v1, \v
  251. GR_NUM b2, "%r0"
  252. GR_NUM r3, \gr
  253. .word 0xE700 | ((v1&15) << 4) | r3
  254. .word (b2 << 12) | (\disp)
  255. MRXBOPC \m, 0x22, v1
  256. .endm
  257. .macro VLVGB v, gr, index, base
  258. VLVG \v, \gr, \index, \base, 0
  259. .endm
  260. .macro VLVGH v, gr, index
  261. VLVG \v, \gr, \index, 1
  262. .endm
  263. .macro VLVGF v, gr, index
  264. VLVG \v, \gr, \index, 2
  265. .endm
  266. .macro VLVGG v, gr, index
  267. VLVG \v, \gr, \index, 3
  268. .endm
  269. /* VECTOR LOAD REGISTER */
  270. .macro VLR v1, v2
  271. VX_NUM v1, \v1
  272. VX_NUM v2, \v2
  273. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  274. .word 0
  275. MRXBOPC 0, 0x56, v1, v2
  276. .endm
  277. /* VECTOR LOAD */
  278. .macro VL v, disp, index="%r0", base
  279. VX_NUM v1, \v
  280. GR_NUM x2, \index
  281. GR_NUM b2, \base
  282. .word 0xE700 | ((v1&15) << 4) | x2
  283. .word (b2 << 12) | (\disp)
  284. MRXBOPC 0, 0x06, v1
  285. .endm
  286. /* VECTOR LOAD ELEMENT */
  287. .macro VLEx vr1, disp, index="%r0", base, m3, opc
  288. VX_NUM v1, \vr1
  289. GR_NUM x2, \index
  290. GR_NUM b2, \base
  291. .word 0xE700 | ((v1&15) << 4) | x2
  292. .word (b2 << 12) | (\disp)
  293. MRXBOPC \m3, \opc, v1
  294. .endm
  295. .macro VLEB vr1, disp, index="%r0", base, m3
  296. VLEx \vr1, \disp, \index, \base, \m3, 0x00
  297. .endm
  298. .macro VLEH vr1, disp, index="%r0", base, m3
  299. VLEx \vr1, \disp, \index, \base, \m3, 0x01
  300. .endm
  301. .macro VLEF vr1, disp, index="%r0", base, m3
  302. VLEx \vr1, \disp, \index, \base, \m3, 0x03
  303. .endm
  304. .macro VLEG vr1, disp, index="%r0", base, m3
  305. VLEx \vr1, \disp, \index, \base, \m3, 0x02
  306. .endm
  307. /* VECTOR LOAD ELEMENT IMMEDIATE */
  308. .macro VLEIx vr1, imm2, m3, opc
  309. VX_NUM v1, \vr1
  310. .word 0xE700 | ((v1&15) << 4)
  311. .word \imm2
  312. MRXBOPC \m3, \opc, v1
  313. .endm
  314. .macro VLEIB vr1, imm2, index
  315. VLEIx \vr1, \imm2, \index, 0x40
  316. .endm
  317. .macro VLEIH vr1, imm2, index
  318. VLEIx \vr1, \imm2, \index, 0x41
  319. .endm
  320. .macro VLEIF vr1, imm2, index
  321. VLEIx \vr1, \imm2, \index, 0x43
  322. .endm
  323. .macro VLEIG vr1, imm2, index
  324. VLEIx \vr1, \imm2, \index, 0x42
  325. .endm
  326. /* VECTOR LOAD GR FROM VR ELEMENT */
  327. .macro VLGV gr, vr, disp, base="%r0", m
  328. GR_NUM r1, \gr
  329. GR_NUM b2, \base
  330. VX_NUM v3, \vr
  331. .word 0xE700 | (r1 << 4) | (v3&15)
  332. .word (b2 << 12) | (\disp)
  333. MRXBOPC \m, 0x21, v3
  334. .endm
  335. .macro VLGVB gr, vr, disp, base="%r0"
  336. VLGV \gr, \vr, \disp, \base, 0
  337. .endm
  338. .macro VLGVH gr, vr, disp, base="%r0"
  339. VLGV \gr, \vr, \disp, \base, 1
  340. .endm
  341. .macro VLGVF gr, vr, disp, base="%r0"
  342. VLGV \gr, \vr, \disp, \base, 2
  343. .endm
  344. .macro VLGVG gr, vr, disp, base="%r0"
  345. VLGV \gr, \vr, \disp, \base, 3
  346. .endm
  347. /* VECTOR LOAD MULTIPLE */
  348. .macro VLM vfrom, vto, disp, base, hint=3
  349. VX_NUM v1, \vfrom
  350. VX_NUM v3, \vto
  351. GR_NUM b2, \base
  352. .word 0xE700 | ((v1&15) << 4) | (v3&15)
  353. .word (b2 << 12) | (\disp)
  354. MRXBOPC \hint, 0x36, v1, v3
  355. .endm
  356. /* VECTOR STORE */
  357. .macro VST vr1, disp, index="%r0", base
  358. VX_NUM v1, \vr1
  359. GR_NUM x2, \index
  360. GR_NUM b2, \base
  361. .word 0xE700 | ((v1&15) << 4) | (x2&15)
  362. .word (b2 << 12) | (\disp)
  363. MRXBOPC 0, 0x0E, v1
  364. .endm
  365. /* VECTOR STORE MULTIPLE */
  366. .macro VSTM vfrom, vto, disp, base, hint=3
  367. VX_NUM v1, \vfrom
  368. VX_NUM v3, \vto
  369. GR_NUM b2, \base
  370. .word 0xE700 | ((v1&15) << 4) | (v3&15)
  371. .word (b2 << 12) | (\disp)
  372. MRXBOPC \hint, 0x3E, v1, v3
  373. .endm
  374. /* VECTOR PERMUTE */
  375. .macro VPERM vr1, vr2, vr3, vr4
  376. VX_NUM v1, \vr1
  377. VX_NUM v2, \vr2
  378. VX_NUM v3, \vr3
  379. VX_NUM v4, \vr4
  380. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  381. .word ((v3&15) << 12)
  382. MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
  383. .endm
  384. /* VECTOR UNPACK LOGICAL LOW */
  385. .macro VUPLL vr1, vr2, m3
  386. VX_NUM v1, \vr1
  387. VX_NUM v2, \vr2
  388. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  389. .word 0x0000
  390. MRXBOPC \m3, 0xD4, v1, v2
  391. .endm
  392. .macro VUPLLB vr1, vr2
  393. VUPLL \vr1, \vr2, 0
  394. .endm
  395. .macro VUPLLH vr1, vr2
  396. VUPLL \vr1, \vr2, 1
  397. .endm
  398. .macro VUPLLF vr1, vr2
  399. VUPLL \vr1, \vr2, 2
  400. .endm
  401. /* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
  402. .macro VPDI vr1, vr2, vr3, m4
  403. VX_NUM v1, \vr1
  404. VX_NUM v2, \vr2
  405. VX_NUM v3, \vr3
  406. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  407. .word ((v3&15) << 12)
  408. MRXBOPC \m4, 0x84, v1, v2, v3
  409. .endm
  410. /* VECTOR REPLICATE */
  411. .macro VREP vr1, vr3, imm2, m4
  412. VX_NUM v1, \vr1
  413. VX_NUM v3, \vr3
  414. .word 0xE700 | ((v1&15) << 4) | (v3&15)
  415. .word \imm2
  416. MRXBOPC \m4, 0x4D, v1, v3
  417. .endm
  418. .macro VREPB vr1, vr3, imm2
  419. VREP \vr1, \vr3, \imm2, 0
  420. .endm
  421. .macro VREPH vr1, vr3, imm2
  422. VREP \vr1, \vr3, \imm2, 1
  423. .endm
  424. .macro VREPF vr1, vr3, imm2
  425. VREP \vr1, \vr3, \imm2, 2
  426. .endm
  427. .macro VREPG vr1, vr3, imm2
  428. VREP \vr1, \vr3, \imm2, 3
  429. .endm
  430. /* VECTOR MERGE HIGH */
  431. .macro VMRH vr1, vr2, vr3, m4
  432. VX_NUM v1, \vr1
  433. VX_NUM v2, \vr2
  434. VX_NUM v3, \vr3
  435. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  436. .word ((v3&15) << 12)
  437. MRXBOPC \m4, 0x61, v1, v2, v3
  438. .endm
  439. .macro VMRHB vr1, vr2, vr3
  440. VMRH \vr1, \vr2, \vr3, 0
  441. .endm
  442. .macro VMRHH vr1, vr2, vr3
  443. VMRH \vr1, \vr2, \vr3, 1
  444. .endm
  445. .macro VMRHF vr1, vr2, vr3
  446. VMRH \vr1, \vr2, \vr3, 2
  447. .endm
  448. .macro VMRHG vr1, vr2, vr3
  449. VMRH \vr1, \vr2, \vr3, 3
  450. .endm
  451. /* VECTOR MERGE LOW */
  452. .macro VMRL vr1, vr2, vr3, m4
  453. VX_NUM v1, \vr1
  454. VX_NUM v2, \vr2
  455. VX_NUM v3, \vr3
  456. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  457. .word ((v3&15) << 12)
  458. MRXBOPC \m4, 0x60, v1, v2, v3
  459. .endm
  460. .macro VMRLB vr1, vr2, vr3
  461. VMRL \vr1, \vr2, \vr3, 0
  462. .endm
  463. .macro VMRLH vr1, vr2, vr3
  464. VMRL \vr1, \vr2, \vr3, 1
  465. .endm
  466. .macro VMRLF vr1, vr2, vr3
  467. VMRL \vr1, \vr2, \vr3, 2
  468. .endm
  469. .macro VMRLG vr1, vr2, vr3
  470. VMRL \vr1, \vr2, \vr3, 3
  471. .endm
  472. /* Vector integer instructions */
  473. /* VECTOR AND */
  474. .macro VN vr1, vr2, vr3
  475. VX_NUM v1, \vr1
  476. VX_NUM v2, \vr2
  477. VX_NUM v3, \vr3
  478. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  479. .word ((v3&15) << 12)
  480. MRXBOPC 0, 0x68, v1, v2, v3
  481. .endm
  482. /* VECTOR EXCLUSIVE OR */
  483. .macro VX vr1, vr2, vr3
  484. VX_NUM v1, \vr1
  485. VX_NUM v2, \vr2
  486. VX_NUM v3, \vr3
  487. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  488. .word ((v3&15) << 12)
  489. MRXBOPC 0, 0x6D, v1, v2, v3
  490. .endm
  491. /* VECTOR GALOIS FIELD MULTIPLY SUM */
  492. .macro VGFM vr1, vr2, vr3, m4
  493. VX_NUM v1, \vr1
  494. VX_NUM v2, \vr2
  495. VX_NUM v3, \vr3
  496. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  497. .word ((v3&15) << 12)
  498. MRXBOPC \m4, 0xB4, v1, v2, v3
  499. .endm
  500. .macro VGFMB vr1, vr2, vr3
  501. VGFM \vr1, \vr2, \vr3, 0
  502. .endm
  503. .macro VGFMH vr1, vr2, vr3
  504. VGFM \vr1, \vr2, \vr3, 1
  505. .endm
  506. .macro VGFMF vr1, vr2, vr3
  507. VGFM \vr1, \vr2, \vr3, 2
  508. .endm
  509. .macro VGFMG vr1, vr2, vr3
  510. VGFM \vr1, \vr2, \vr3, 3
  511. .endm
  512. /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
  513. .macro VGFMA vr1, vr2, vr3, vr4, m5
  514. VX_NUM v1, \vr1
  515. VX_NUM v2, \vr2
  516. VX_NUM v3, \vr3
  517. VX_NUM v4, \vr4
  518. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  519. .word ((v3&15) << 12) | (\m5 << 8)
  520. MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
  521. .endm
  522. .macro VGFMAB vr1, vr2, vr3, vr4
  523. VGFMA \vr1, \vr2, \vr3, \vr4, 0
  524. .endm
  525. .macro VGFMAH vr1, vr2, vr3, vr4
  526. VGFMA \vr1, \vr2, \vr3, \vr4, 1
  527. .endm
  528. .macro VGFMAF vr1, vr2, vr3, vr4
  529. VGFMA \vr1, \vr2, \vr3, \vr4, 2
  530. .endm
  531. .macro VGFMAG vr1, vr2, vr3, vr4
  532. VGFMA \vr1, \vr2, \vr3, \vr4, 3
  533. .endm
  534. /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
  535. .macro VSRLB vr1, vr2, vr3
  536. VX_NUM v1, \vr1
  537. VX_NUM v2, \vr2
  538. VX_NUM v3, \vr3
  539. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  540. .word ((v3&15) << 12)
  541. MRXBOPC 0, 0x7D, v1, v2, v3
  542. .endm
  543. /* VECTOR REPLICATE IMMEDIATE */
  544. .macro VREPI vr1, imm2, m3
  545. VX_NUM v1, \vr1
  546. .word 0xE700 | ((v1&15) << 4)
  547. .word \imm2
  548. MRXBOPC \m3, 0x45, v1
  549. .endm
  550. .macro VREPIB vr1, imm2
  551. VREPI \vr1, \imm2, 0
  552. .endm
  553. .macro VREPIH vr1, imm2
  554. VREPI \vr1, \imm2, 1
  555. .endm
  556. .macro VREPIF vr1, imm2
  557. VREPI \vr1, \imm2, 2
  558. .endm
  559. .macro VREPIG vr1, imm2
  560. VREP \vr1, \imm2, 3
  561. .endm
  562. /* VECTOR ADD */
  563. .macro VA vr1, vr2, vr3, m4
  564. VX_NUM v1, \vr1
  565. VX_NUM v2, \vr2
  566. VX_NUM v3, \vr3
  567. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  568. .word ((v3&15) << 12)
  569. MRXBOPC \m4, 0xF3, v1, v2, v3
  570. .endm
  571. .macro VAB vr1, vr2, vr3
  572. VA \vr1, \vr2, \vr3, 0
  573. .endm
  574. .macro VAH vr1, vr2, vr3
  575. VA \vr1, \vr2, \vr3, 1
  576. .endm
  577. .macro VAF vr1, vr2, vr3
  578. VA \vr1, \vr2, \vr3, 2
  579. .endm
  580. .macro VAG vr1, vr2, vr3
  581. VA \vr1, \vr2, \vr3, 3
  582. .endm
  583. .macro VAQ vr1, vr2, vr3
  584. VA \vr1, \vr2, \vr3, 4
  585. .endm
  586. /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
  587. .macro VESRAV vr1, vr2, vr3, m4
  588. VX_NUM v1, \vr1
  589. VX_NUM v2, \vr2
  590. VX_NUM v3, \vr3
  591. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  592. .word ((v3&15) << 12)
  593. MRXBOPC \m4, 0x7A, v1, v2, v3
  594. .endm
  595. .macro VESRAVB vr1, vr2, vr3
  596. VESRAV \vr1, \vr2, \vr3, 0
  597. .endm
  598. .macro VESRAVH vr1, vr2, vr3
  599. VESRAV \vr1, \vr2, \vr3, 1
  600. .endm
  601. .macro VESRAVF vr1, vr2, vr3
  602. VESRAV \vr1, \vr2, \vr3, 2
  603. .endm
  604. .macro VESRAVG vr1, vr2, vr3
  605. VESRAV \vr1, \vr2, \vr3, 3
  606. .endm
  607. /* VECTOR ELEMENT ROTATE LEFT LOGICAL */
  608. .macro VERLL vr1, vr3, disp, base="%r0", m4
  609. VX_NUM v1, \vr1
  610. VX_NUM v3, \vr3
  611. GR_NUM b2, \base
  612. .word 0xE700 | ((v1&15) << 4) | (v3&15)
  613. .word (b2 << 12) | (\disp)
  614. MRXBOPC \m4, 0x33, v1, v3
  615. .endm
  616. .macro VERLLB vr1, vr3, disp, base="%r0"
  617. VERLL \vr1, \vr3, \disp, \base, 0
  618. .endm
  619. .macro VERLLH vr1, vr3, disp, base="%r0"
  620. VERLL \vr1, \vr3, \disp, \base, 1
  621. .endm
  622. .macro VERLLF vr1, vr3, disp, base="%r0"
  623. VERLL \vr1, \vr3, \disp, \base, 2
  624. .endm
  625. .macro VERLLG vr1, vr3, disp, base="%r0"
  626. VERLL \vr1, \vr3, \disp, \base, 3
  627. .endm
  628. /* VECTOR SHIFT LEFT DOUBLE BY BYTE */
  629. .macro VSLDB vr1, vr2, vr3, imm4
  630. VX_NUM v1, \vr1
  631. VX_NUM v2, \vr2
  632. VX_NUM v3, \vr3
  633. .word 0xE700 | ((v1&15) << 4) | (v2&15)
  634. .word ((v3&15) << 12) | (\imm4)
  635. MRXBOPC 0, 0x77, v1, v2, v3
  636. .endm
  637. #endif /* __ASSEMBLY__ */
  638. #endif /* __ASM_S390_VX_INSN_H */