cast6-avx-x86_64-asm_64.S 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
  4. *
  5. * Copyright (C) 2012 Johannes Goetzfried
  6. * <[email protected]>
  7. *
  8. * Copyright © 2012-2013 Jussi Kivilinna <[email protected]>
  9. */
  10. #include <linux/linkage.h>
  11. #include <asm/frame.h>
  12. #include "glue_helper-asm-avx.S"
  13. .file "cast6-avx-x86_64-asm_64.S"
  14. .extern cast_s1
  15. .extern cast_s2
  16. .extern cast_s3
  17. .extern cast_s4
  18. /* structure of crypto context */
  19. #define km 0
  20. #define kr (12*4*4)
  21. /* s-boxes */
  22. #define s1 cast_s1
  23. #define s2 cast_s2
  24. #define s3 cast_s3
  25. #define s4 cast_s4
  26. /**********************************************************************
  27. 8-way AVX cast6
  28. **********************************************************************/
  29. #define CTX %r15
  30. #define RA1 %xmm0
  31. #define RB1 %xmm1
  32. #define RC1 %xmm2
  33. #define RD1 %xmm3
  34. #define RA2 %xmm4
  35. #define RB2 %xmm5
  36. #define RC2 %xmm6
  37. #define RD2 %xmm7
  38. #define RX %xmm8
  39. #define RKM %xmm9
  40. #define RKR %xmm10
  41. #define RKRF %xmm11
  42. #define RKRR %xmm12
  43. #define R32 %xmm13
  44. #define R1ST %xmm14
  45. #define RTMP %xmm15
  46. #define RID1 %rdi
  47. #define RID1d %edi
  48. #define RID2 %rsi
  49. #define RID2d %esi
  50. #define RGI1 %rdx
  51. #define RGI1bl %dl
  52. #define RGI1bh %dh
  53. #define RGI2 %rcx
  54. #define RGI2bl %cl
  55. #define RGI2bh %ch
  56. #define RGI3 %rax
  57. #define RGI3bl %al
  58. #define RGI3bh %ah
  59. #define RGI4 %rbx
  60. #define RGI4bl %bl
  61. #define RGI4bh %bh
  62. #define RFS1 %r8
  63. #define RFS1d %r8d
  64. #define RFS2 %r9
  65. #define RFS2d %r9d
  66. #define RFS3 %r10
  67. #define RFS3d %r10d
  68. #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
  69. movzbl src ## bh, RID1d; \
  70. movzbl src ## bl, RID2d; \
  71. shrq $16, src; \
  72. movl s1(, RID1, 4), dst ## d; \
  73. op1 s2(, RID2, 4), dst ## d; \
  74. movzbl src ## bh, RID1d; \
  75. movzbl src ## bl, RID2d; \
  76. interleave_op(il_reg); \
  77. op2 s3(, RID1, 4), dst ## d; \
  78. op3 s4(, RID2, 4), dst ## d;
  79. #define dummy(d) /* do nothing */
  80. #define shr_next(reg) \
  81. shrq $16, reg;
  82. #define F_head(a, x, gi1, gi2, op0) \
  83. op0 a, RKM, x; \
  84. vpslld RKRF, x, RTMP; \
  85. vpsrld RKRR, x, x; \
  86. vpor RTMP, x, x; \
  87. \
  88. vmovq x, gi1; \
  89. vpextrq $1, x, gi2;
  90. #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
  91. lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
  92. lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
  93. \
  94. lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
  95. shlq $32, RFS2; \
  96. orq RFS1, RFS2; \
  97. lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
  98. shlq $32, RFS1; \
  99. orq RFS1, RFS3; \
  100. \
  101. vmovq RFS2, x; \
  102. vpinsrq $1, RFS3, x, x;
  103. #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
  104. F_head(b1, RX, RGI1, RGI2, op0); \
  105. F_head(b2, RX, RGI3, RGI4, op0); \
  106. \
  107. F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
  108. F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
  109. \
  110. vpxor a1, RX, a1; \
  111. vpxor a2, RTMP, a2;
  112. #define F1_2(a1, b1, a2, b2) \
  113. F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
  114. #define F2_2(a1, b1, a2, b2) \
  115. F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
  116. #define F3_2(a1, b1, a2, b2) \
  117. F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
  118. #define qop(in, out, f) \
  119. F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
  120. #define get_round_keys(nn) \
  121. vbroadcastss (km+(4*(nn)))(CTX), RKM; \
  122. vpand R1ST, RKR, RKRF; \
  123. vpsubq RKRF, R32, RKRR; \
  124. vpsrldq $1, RKR, RKR;
  125. #define Q(n) \
  126. get_round_keys(4*n+0); \
  127. qop(RD, RC, 1); \
  128. \
  129. get_round_keys(4*n+1); \
  130. qop(RC, RB, 2); \
  131. \
  132. get_round_keys(4*n+2); \
  133. qop(RB, RA, 3); \
  134. \
  135. get_round_keys(4*n+3); \
  136. qop(RA, RD, 1);
  137. #define QBAR(n) \
  138. get_round_keys(4*n+3); \
  139. qop(RA, RD, 1); \
  140. \
  141. get_round_keys(4*n+2); \
  142. qop(RB, RA, 3); \
  143. \
  144. get_round_keys(4*n+1); \
  145. qop(RC, RB, 2); \
  146. \
  147. get_round_keys(4*n+0); \
  148. qop(RD, RC, 1);
  149. #define shuffle(mask) \
  150. vpshufb mask, RKR, RKR;
  151. #define preload_rkr(n, do_mask, mask) \
  152. vbroadcastss .L16_mask, RKR; \
  153. /* add 16-bit rotation to key rotations (mod 32) */ \
  154. vpxor (kr+n*16)(CTX), RKR, RKR; \
  155. do_mask(mask);
  156. #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  157. vpunpckldq x1, x0, t0; \
  158. vpunpckhdq x1, x0, t2; \
  159. vpunpckldq x3, x2, t1; \
  160. vpunpckhdq x3, x2, x3; \
  161. \
  162. vpunpcklqdq t1, t0, x0; \
  163. vpunpckhqdq t1, t0, x1; \
  164. vpunpcklqdq x3, t2, x2; \
  165. vpunpckhqdq x3, t2, x3;
  166. #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
  167. vpshufb rmask, x0, x0; \
  168. vpshufb rmask, x1, x1; \
  169. vpshufb rmask, x2, x2; \
  170. vpshufb rmask, x3, x3; \
  171. \
  172. transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
  173. #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
  174. transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
  175. \
  176. vpshufb rmask, x0, x0; \
  177. vpshufb rmask, x1, x1; \
  178. vpshufb rmask, x2, x2; \
  179. vpshufb rmask, x3, x3;
  180. .section .rodata.cst16, "aM", @progbits, 16
  181. .align 16
  182. .Lbswap_mask:
  183. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  184. .Lbswap128_mask:
  185. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  186. .Lrkr_enc_Q_Q_QBAR_QBAR:
  187. .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
  188. .Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
  189. .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  190. .Lrkr_dec_Q_Q_Q_Q:
  191. .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
  192. .Lrkr_dec_Q_Q_QBAR_QBAR:
  193. .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
  194. .Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
  195. .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  196. .section .rodata.cst4.L16_mask, "aM", @progbits, 4
  197. .align 4
  198. .L16_mask:
  199. .byte 16, 16, 16, 16
  200. .section .rodata.cst4.L32_mask, "aM", @progbits, 4
  201. .align 4
  202. .L32_mask:
  203. .byte 32, 0, 0, 0
  204. .section .rodata.cst4.first_mask, "aM", @progbits, 4
  205. .align 4
  206. .Lfirst_mask:
  207. .byte 0x1f, 0, 0, 0
  208. .text
  209. .align 8
  210. SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
  211. /* input:
  212. * %rdi: ctx
  213. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
  214. * output:
  215. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
  216. */
  217. pushq %r15;
  218. pushq %rbx;
  219. movq %rdi, CTX;
  220. vmovdqa .Lbswap_mask, RKM;
  221. vmovd .Lfirst_mask, R1ST;
  222. vmovd .L32_mask, R32;
  223. inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  224. inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  225. preload_rkr(0, dummy, none);
  226. Q(0);
  227. Q(1);
  228. Q(2);
  229. Q(3);
  230. preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
  231. Q(4);
  232. Q(5);
  233. QBAR(6);
  234. QBAR(7);
  235. preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
  236. QBAR(8);
  237. QBAR(9);
  238. QBAR(10);
  239. QBAR(11);
  240. popq %rbx;
  241. popq %r15;
  242. vmovdqa .Lbswap_mask, RKM;
  243. outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  244. outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  245. RET;
  246. SYM_FUNC_END(__cast6_enc_blk8)
  247. .align 8
  248. SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
  249. /* input:
  250. * %rdi: ctx
  251. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
  252. * output:
  253. * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
  254. */
  255. pushq %r15;
  256. pushq %rbx;
  257. movq %rdi, CTX;
  258. vmovdqa .Lbswap_mask, RKM;
  259. vmovd .Lfirst_mask, R1ST;
  260. vmovd .L32_mask, R32;
  261. inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  262. inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  263. preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
  264. Q(11);
  265. Q(10);
  266. Q(9);
  267. Q(8);
  268. preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
  269. Q(7);
  270. Q(6);
  271. QBAR(5);
  272. QBAR(4);
  273. preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
  274. QBAR(3);
  275. QBAR(2);
  276. QBAR(1);
  277. QBAR(0);
  278. popq %rbx;
  279. popq %r15;
  280. vmovdqa .Lbswap_mask, RKM;
  281. outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
  282. outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
  283. RET;
  284. SYM_FUNC_END(__cast6_dec_blk8)
  285. SYM_FUNC_START(cast6_ecb_enc_8way)
  286. /* input:
  287. * %rdi: ctx
  288. * %rsi: dst
  289. * %rdx: src
  290. */
  291. FRAME_BEGIN
  292. pushq %r15;
  293. movq %rdi, CTX;
  294. movq %rsi, %r11;
  295. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  296. call __cast6_enc_blk8;
  297. store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  298. popq %r15;
  299. FRAME_END
  300. RET;
  301. SYM_FUNC_END(cast6_ecb_enc_8way)
  302. SYM_FUNC_START(cast6_ecb_dec_8way)
  303. /* input:
  304. * %rdi: ctx
  305. * %rsi: dst
  306. * %rdx: src
  307. */
  308. FRAME_BEGIN
  309. pushq %r15;
  310. movq %rdi, CTX;
  311. movq %rsi, %r11;
  312. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  313. call __cast6_dec_blk8;
  314. store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  315. popq %r15;
  316. FRAME_END
  317. RET;
  318. SYM_FUNC_END(cast6_ecb_dec_8way)
  319. SYM_FUNC_START(cast6_cbc_dec_8way)
  320. /* input:
  321. * %rdi: ctx
  322. * %rsi: dst
  323. * %rdx: src
  324. */
  325. FRAME_BEGIN
  326. pushq %r12;
  327. pushq %r15;
  328. movq %rdi, CTX;
  329. movq %rsi, %r11;
  330. movq %rdx, %r12;
  331. load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  332. call __cast6_dec_blk8;
  333. store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
  334. popq %r15;
  335. popq %r12;
  336. FRAME_END
  337. RET;
  338. SYM_FUNC_END(cast6_cbc_dec_8way)