poly1305-armv8.pl 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. #!/usr/bin/env perl
  2. # SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
  3. #
  4. # ====================================================================
  5. # Written by Andy Polyakov, @dot-asm, initially for the OpenSSL
  6. # project.
  7. # ====================================================================
  8. #
  9. # This module implements Poly1305 hash for ARMv8.
  10. #
  11. # June 2015
  12. #
  13. # Numbers are cycles per processed byte with poly1305_blocks alone.
  14. #
  15. # IALU/gcc-4.9 NEON
  16. #
  17. # Apple A7 1.86/+5% 0.72
  18. # Cortex-A53 2.69/+58% 1.47
  19. # Cortex-A57 2.70/+7% 1.14
  20. # Denver 1.64/+50% 1.18(*)
  21. # X-Gene 2.13/+68% 2.27
  22. # Mongoose 1.77/+75% 1.12
  23. # Kryo 2.70/+55% 1.13
  24. # ThunderX2 1.17/+95% 1.36
  25. #
  26. # (*) estimate based on resources availability is less than 1.0,
  27. # i.e. measured result is worse than expected, presumably binary
  28. # translator is not almighty;
  29. $flavour=shift;
  30. $output=shift;
  31. if ($flavour && $flavour ne "void") {
  32. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  33. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  34. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  35. die "can't locate arm-xlate.pl";
  36. open STDOUT,"| \"$^X\" $xlate $flavour $output";
  37. } else {
  38. open STDOUT,">$output";
  39. }
  40. my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
  41. my ($mac,$nonce)=($inp,$len);
  42. my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
  43. $code.=<<___;
  44. #ifndef __KERNEL__
  45. # include "arm_arch.h"
  46. .extern OPENSSL_armcap_P
  47. #endif
  48. .text
  49. // forward "declarations" are required for Apple
  50. .globl poly1305_blocks
  51. .globl poly1305_emit
  52. .globl poly1305_init
  53. .type poly1305_init,%function
  54. .align 5
  55. poly1305_init:
  56. cmp $inp,xzr
  57. stp xzr,xzr,[$ctx] // zero hash value
  58. stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
  59. csel x0,xzr,x0,eq
  60. b.eq .Lno_key
  61. #ifndef __KERNEL__
  62. adrp x17,OPENSSL_armcap_P
  63. ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
  64. #endif
  65. ldp $r0,$r1,[$inp] // load key
  66. mov $s1,#0xfffffffc0fffffff
  67. movk $s1,#0x0fff,lsl#48
  68. #ifdef __AARCH64EB__
  69. rev $r0,$r0 // flip bytes
  70. rev $r1,$r1
  71. #endif
  72. and $r0,$r0,$s1 // &=0ffffffc0fffffff
  73. and $s1,$s1,#-4
  74. and $r1,$r1,$s1 // &=0ffffffc0ffffffc
  75. mov w#$s1,#-1
  76. stp $r0,$r1,[$ctx,#32] // save key value
  77. str w#$s1,[$ctx,#48] // impossible key power value
  78. #ifndef __KERNEL__
  79. tst w17,#ARMV7_NEON
  80. adr $d0,.Lpoly1305_blocks
  81. adr $r0,.Lpoly1305_blocks_neon
  82. adr $d1,.Lpoly1305_emit
  83. csel $d0,$d0,$r0,eq
  84. # ifdef __ILP32__
  85. stp w#$d0,w#$d1,[$len]
  86. # else
  87. stp $d0,$d1,[$len]
  88. # endif
  89. #endif
  90. mov x0,#1
  91. .Lno_key:
  92. ret
  93. .size poly1305_init,.-poly1305_init
  94. .type poly1305_blocks,%function
  95. .align 5
  96. poly1305_blocks:
  97. .Lpoly1305_blocks:
  98. ands $len,$len,#-16
  99. b.eq .Lno_data
  100. ldp $h0,$h1,[$ctx] // load hash value
  101. ldp $h2,x17,[$ctx,#16] // [along with is_base2_26]
  102. ldp $r0,$r1,[$ctx,#32] // load key value
  103. #ifdef __AARCH64EB__
  104. lsr $d0,$h0,#32
  105. mov w#$d1,w#$h0
  106. lsr $d2,$h1,#32
  107. mov w15,w#$h1
  108. lsr x16,$h2,#32
  109. #else
  110. mov w#$d0,w#$h0
  111. lsr $d1,$h0,#32
  112. mov w#$d2,w#$h1
  113. lsr x15,$h1,#32
  114. mov w16,w#$h2
  115. #endif
  116. add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64
  117. lsr $d1,$d2,#12
  118. adds $d0,$d0,$d2,lsl#52
  119. add $d1,$d1,x15,lsl#14
  120. adc $d1,$d1,xzr
  121. lsr $d2,x16,#24
  122. adds $d1,$d1,x16,lsl#40
  123. adc $d2,$d2,xzr
  124. cmp x17,#0 // is_base2_26?
  125. add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
  126. csel $h0,$h0,$d0,eq // choose between radixes
  127. csel $h1,$h1,$d1,eq
  128. csel $h2,$h2,$d2,eq
  129. .Loop:
  130. ldp $t0,$t1,[$inp],#16 // load input
  131. sub $len,$len,#16
  132. #ifdef __AARCH64EB__
  133. rev $t0,$t0
  134. rev $t1,$t1
  135. #endif
  136. adds $h0,$h0,$t0 // accumulate input
  137. adcs $h1,$h1,$t1
  138. mul $d0,$h0,$r0 // h0*r0
  139. adc $h2,$h2,$padbit
  140. umulh $d1,$h0,$r0
  141. mul $t0,$h1,$s1 // h1*5*r1
  142. umulh $t1,$h1,$s1
  143. adds $d0,$d0,$t0
  144. mul $t0,$h0,$r1 // h0*r1
  145. adc $d1,$d1,$t1
  146. umulh $d2,$h0,$r1
  147. adds $d1,$d1,$t0
  148. mul $t0,$h1,$r0 // h1*r0
  149. adc $d2,$d2,xzr
  150. umulh $t1,$h1,$r0
  151. adds $d1,$d1,$t0
  152. mul $t0,$h2,$s1 // h2*5*r1
  153. adc $d2,$d2,$t1
  154. mul $t1,$h2,$r0 // h2*r0
  155. adds $d1,$d1,$t0
  156. adc $d2,$d2,$t1
  157. and $t0,$d2,#-4 // final reduction
  158. and $h2,$d2,#3
  159. add $t0,$t0,$d2,lsr#2
  160. adds $h0,$d0,$t0
  161. adcs $h1,$d1,xzr
  162. adc $h2,$h2,xzr
  163. cbnz $len,.Loop
  164. stp $h0,$h1,[$ctx] // store hash value
  165. stp $h2,xzr,[$ctx,#16] // [and clear is_base2_26]
  166. .Lno_data:
  167. ret
  168. .size poly1305_blocks,.-poly1305_blocks
  169. .type poly1305_emit,%function
  170. .align 5
  171. poly1305_emit:
  172. .Lpoly1305_emit:
  173. ldp $h0,$h1,[$ctx] // load hash base 2^64
  174. ldp $h2,$r0,[$ctx,#16] // [along with is_base2_26]
  175. ldp $t0,$t1,[$nonce] // load nonce
  176. #ifdef __AARCH64EB__
  177. lsr $d0,$h0,#32
  178. mov w#$d1,w#$h0
  179. lsr $d2,$h1,#32
  180. mov w15,w#$h1
  181. lsr x16,$h2,#32
  182. #else
  183. mov w#$d0,w#$h0
  184. lsr $d1,$h0,#32
  185. mov w#$d2,w#$h1
  186. lsr x15,$h1,#32
  187. mov w16,w#$h2
  188. #endif
  189. add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64
  190. lsr $d1,$d2,#12
  191. adds $d0,$d0,$d2,lsl#52
  192. add $d1,$d1,x15,lsl#14
  193. adc $d1,$d1,xzr
  194. lsr $d2,x16,#24
  195. adds $d1,$d1,x16,lsl#40
  196. adc $d2,$d2,xzr
  197. cmp $r0,#0 // is_base2_26?
  198. csel $h0,$h0,$d0,eq // choose between radixes
  199. csel $h1,$h1,$d1,eq
  200. csel $h2,$h2,$d2,eq
  201. adds $d0,$h0,#5 // compare to modulus
  202. adcs $d1,$h1,xzr
  203. adc $d2,$h2,xzr
  204. tst $d2,#-4 // see if it's carried/borrowed
  205. csel $h0,$h0,$d0,eq
  206. csel $h1,$h1,$d1,eq
  207. #ifdef __AARCH64EB__
  208. ror $t0,$t0,#32 // flip nonce words
  209. ror $t1,$t1,#32
  210. #endif
  211. adds $h0,$h0,$t0 // accumulate nonce
  212. adc $h1,$h1,$t1
  213. #ifdef __AARCH64EB__
  214. rev $h0,$h0 // flip output bytes
  215. rev $h1,$h1
  216. #endif
  217. stp $h0,$h1,[$mac] // write result
  218. ret
  219. .size poly1305_emit,.-poly1305_emit
  220. ___
  221. my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
  222. my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
  223. my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
  224. my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
  225. my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
  226. my ($T0,$T1,$MASK) = map("v$_",(29..31));
  227. my ($in2,$zeros)=("x16","x17");
  228. my $is_base2_26 = $zeros; # borrow
  229. $code.=<<___;
  230. .type poly1305_mult,%function
  231. .align 5
  232. poly1305_mult:
  233. mul $d0,$h0,$r0 // h0*r0
  234. umulh $d1,$h0,$r0
  235. mul $t0,$h1,$s1 // h1*5*r1
  236. umulh $t1,$h1,$s1
  237. adds $d0,$d0,$t0
  238. mul $t0,$h0,$r1 // h0*r1
  239. adc $d1,$d1,$t1
  240. umulh $d2,$h0,$r1
  241. adds $d1,$d1,$t0
  242. mul $t0,$h1,$r0 // h1*r0
  243. adc $d2,$d2,xzr
  244. umulh $t1,$h1,$r0
  245. adds $d1,$d1,$t0
  246. mul $t0,$h2,$s1 // h2*5*r1
  247. adc $d2,$d2,$t1
  248. mul $t1,$h2,$r0 // h2*r0
  249. adds $d1,$d1,$t0
  250. adc $d2,$d2,$t1
  251. and $t0,$d2,#-4 // final reduction
  252. and $h2,$d2,#3
  253. add $t0,$t0,$d2,lsr#2
  254. adds $h0,$d0,$t0
  255. adcs $h1,$d1,xzr
  256. adc $h2,$h2,xzr
  257. ret
  258. .size poly1305_mult,.-poly1305_mult
  259. .type poly1305_splat,%function
  260. .align 4
  261. poly1305_splat:
  262. and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
  263. ubfx x13,$h0,#26,#26
  264. extr x14,$h1,$h0,#52
  265. and x14,x14,#0x03ffffff
  266. ubfx x15,$h1,#14,#26
  267. extr x16,$h2,$h1,#40
  268. str w12,[$ctx,#16*0] // r0
  269. add w12,w13,w13,lsl#2 // r1*5
  270. str w13,[$ctx,#16*1] // r1
  271. add w13,w14,w14,lsl#2 // r2*5
  272. str w12,[$ctx,#16*2] // s1
  273. str w14,[$ctx,#16*3] // r2
  274. add w14,w15,w15,lsl#2 // r3*5
  275. str w13,[$ctx,#16*4] // s2
  276. str w15,[$ctx,#16*5] // r3
  277. add w15,w16,w16,lsl#2 // r4*5
  278. str w14,[$ctx,#16*6] // s3
  279. str w16,[$ctx,#16*7] // r4
  280. str w15,[$ctx,#16*8] // s4
  281. ret
  282. .size poly1305_splat,.-poly1305_splat
  283. #ifdef __KERNEL__
  284. .globl poly1305_blocks_neon
  285. #endif
  286. .type poly1305_blocks_neon,%function
  287. .align 5
  288. poly1305_blocks_neon:
  289. .Lpoly1305_blocks_neon:
  290. ldr $is_base2_26,[$ctx,#24]
  291. cmp $len,#128
  292. b.lo .Lpoly1305_blocks
  293. .inst 0xd503233f // paciasp
  294. stp x29,x30,[sp,#-80]!
  295. add x29,sp,#0
  296. stp d8,d9,[sp,#16] // meet ABI requirements
  297. stp d10,d11,[sp,#32]
  298. stp d12,d13,[sp,#48]
  299. stp d14,d15,[sp,#64]
  300. cbz $is_base2_26,.Lbase2_64_neon
  301. ldp w10,w11,[$ctx] // load hash value base 2^26
  302. ldp w12,w13,[$ctx,#8]
  303. ldr w14,[$ctx,#16]
  304. tst $len,#31
  305. b.eq .Leven_neon
  306. ldp $r0,$r1,[$ctx,#32] // load key value
  307. add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
  308. lsr $h1,x12,#12
  309. adds $h0,$h0,x12,lsl#52
  310. add $h1,$h1,x13,lsl#14
  311. adc $h1,$h1,xzr
  312. lsr $h2,x14,#24
  313. adds $h1,$h1,x14,lsl#40
  314. adc $d2,$h2,xzr // can be partially reduced...
  315. ldp $d0,$d1,[$inp],#16 // load input
  316. sub $len,$len,#16
  317. add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
  318. #ifdef __AARCH64EB__
  319. rev $d0,$d0
  320. rev $d1,$d1
  321. #endif
  322. adds $h0,$h0,$d0 // accumulate input
  323. adcs $h1,$h1,$d1
  324. adc $h2,$h2,$padbit
  325. bl poly1305_mult
  326. and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
  327. ubfx x11,$h0,#26,#26
  328. extr x12,$h1,$h0,#52
  329. and x12,x12,#0x03ffffff
  330. ubfx x13,$h1,#14,#26
  331. extr x14,$h2,$h1,#40
  332. b .Leven_neon
  333. .align 4
  334. .Lbase2_64_neon:
  335. ldp $r0,$r1,[$ctx,#32] // load key value
  336. ldp $h0,$h1,[$ctx] // load hash value base 2^64
  337. ldr $h2,[$ctx,#16]
  338. tst $len,#31
  339. b.eq .Linit_neon
  340. ldp $d0,$d1,[$inp],#16 // load input
  341. sub $len,$len,#16
  342. add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
  343. #ifdef __AARCH64EB__
  344. rev $d0,$d0
  345. rev $d1,$d1
  346. #endif
  347. adds $h0,$h0,$d0 // accumulate input
  348. adcs $h1,$h1,$d1
  349. adc $h2,$h2,$padbit
  350. bl poly1305_mult
  351. .Linit_neon:
  352. ldr w17,[$ctx,#48] // first table element
  353. and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
  354. ubfx x11,$h0,#26,#26
  355. extr x12,$h1,$h0,#52
  356. and x12,x12,#0x03ffffff
  357. ubfx x13,$h1,#14,#26
  358. extr x14,$h2,$h1,#40
  359. cmp w17,#-1 // is value impossible?
  360. b.ne .Leven_neon
  361. fmov ${H0},x10
  362. fmov ${H1},x11
  363. fmov ${H2},x12
  364. fmov ${H3},x13
  365. fmov ${H4},x14
  366. ////////////////////////////////// initialize r^n table
  367. mov $h0,$r0 // r^1
  368. add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
  369. mov $h1,$r1
  370. mov $h2,xzr
  371. add $ctx,$ctx,#48+12
  372. bl poly1305_splat
  373. bl poly1305_mult // r^2
  374. sub $ctx,$ctx,#4
  375. bl poly1305_splat
  376. bl poly1305_mult // r^3
  377. sub $ctx,$ctx,#4
  378. bl poly1305_splat
  379. bl poly1305_mult // r^4
  380. sub $ctx,$ctx,#4
  381. bl poly1305_splat
  382. sub $ctx,$ctx,#48 // restore original $ctx
  383. b .Ldo_neon
  384. .align 4
  385. .Leven_neon:
  386. fmov ${H0},x10
  387. fmov ${H1},x11
  388. fmov ${H2},x12
  389. fmov ${H3},x13
  390. fmov ${H4},x14
  391. .Ldo_neon:
  392. ldp x8,x12,[$inp,#32] // inp[2:3]
  393. subs $len,$len,#64
  394. ldp x9,x13,[$inp,#48]
  395. add $in2,$inp,#96
  396. adr $zeros,.Lzeros
  397. lsl $padbit,$padbit,#24
  398. add x15,$ctx,#48
  399. #ifdef __AARCH64EB__
  400. rev x8,x8
  401. rev x12,x12
  402. rev x9,x9
  403. rev x13,x13
  404. #endif
  405. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  406. and x5,x9,#0x03ffffff
  407. ubfx x6,x8,#26,#26
  408. ubfx x7,x9,#26,#26
  409. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  410. extr x8,x12,x8,#52
  411. extr x9,x13,x9,#52
  412. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  413. fmov $IN23_0,x4
  414. and x8,x8,#0x03ffffff
  415. and x9,x9,#0x03ffffff
  416. ubfx x10,x12,#14,#26
  417. ubfx x11,x13,#14,#26
  418. add x12,$padbit,x12,lsr#40
  419. add x13,$padbit,x13,lsr#40
  420. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  421. fmov $IN23_1,x6
  422. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  423. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  424. fmov $IN23_2,x8
  425. fmov $IN23_3,x10
  426. fmov $IN23_4,x12
  427. ldp x8,x12,[$inp],#16 // inp[0:1]
  428. ldp x9,x13,[$inp],#48
  429. ld1 {$R0,$R1,$S1,$R2},[x15],#64
  430. ld1 {$S2,$R3,$S3,$R4},[x15],#64
  431. ld1 {$S4},[x15]
  432. #ifdef __AARCH64EB__
  433. rev x8,x8
  434. rev x12,x12
  435. rev x9,x9
  436. rev x13,x13
  437. #endif
  438. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  439. and x5,x9,#0x03ffffff
  440. ubfx x6,x8,#26,#26
  441. ubfx x7,x9,#26,#26
  442. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  443. extr x8,x12,x8,#52
  444. extr x9,x13,x9,#52
  445. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  446. fmov $IN01_0,x4
  447. and x8,x8,#0x03ffffff
  448. and x9,x9,#0x03ffffff
  449. ubfx x10,x12,#14,#26
  450. ubfx x11,x13,#14,#26
  451. add x12,$padbit,x12,lsr#40
  452. add x13,$padbit,x13,lsr#40
  453. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  454. fmov $IN01_1,x6
  455. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  456. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  457. movi $MASK.2d,#-1
  458. fmov $IN01_2,x8
  459. fmov $IN01_3,x10
  460. fmov $IN01_4,x12
  461. ushr $MASK.2d,$MASK.2d,#38
  462. b.ls .Lskip_loop
  463. .align 4
  464. .Loop_neon:
  465. ////////////////////////////////////////////////////////////////
  466. // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
  467. // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
  468. // \___________________/
  469. // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
  470. // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
  471. // \___________________/ \____________________/
  472. //
  473. // Note that we start with inp[2:3]*r^2. This is because it
  474. // doesn't depend on reduction in previous iteration.
  475. ////////////////////////////////////////////////////////////////
  476. // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
  477. // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
  478. // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
  479. // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
  480. // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
  481. subs $len,$len,#64
  482. umull $ACC4,$IN23_0,${R4}[2]
  483. csel $in2,$zeros,$in2,lo
  484. umull $ACC3,$IN23_0,${R3}[2]
  485. umull $ACC2,$IN23_0,${R2}[2]
  486. ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
  487. umull $ACC1,$IN23_0,${R1}[2]
  488. ldp x9,x13,[$in2],#48
  489. umull $ACC0,$IN23_0,${R0}[2]
  490. #ifdef __AARCH64EB__
  491. rev x8,x8
  492. rev x12,x12
  493. rev x9,x9
  494. rev x13,x13
  495. #endif
  496. umlal $ACC4,$IN23_1,${R3}[2]
  497. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  498. umlal $ACC3,$IN23_1,${R2}[2]
  499. and x5,x9,#0x03ffffff
  500. umlal $ACC2,$IN23_1,${R1}[2]
  501. ubfx x6,x8,#26,#26
  502. umlal $ACC1,$IN23_1,${R0}[2]
  503. ubfx x7,x9,#26,#26
  504. umlal $ACC0,$IN23_1,${S4}[2]
  505. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  506. umlal $ACC4,$IN23_2,${R2}[2]
  507. extr x8,x12,x8,#52
  508. umlal $ACC3,$IN23_2,${R1}[2]
  509. extr x9,x13,x9,#52
  510. umlal $ACC2,$IN23_2,${R0}[2]
  511. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  512. umlal $ACC1,$IN23_2,${S4}[2]
  513. fmov $IN23_0,x4
  514. umlal $ACC0,$IN23_2,${S3}[2]
  515. and x8,x8,#0x03ffffff
  516. umlal $ACC4,$IN23_3,${R1}[2]
  517. and x9,x9,#0x03ffffff
  518. umlal $ACC3,$IN23_3,${R0}[2]
  519. ubfx x10,x12,#14,#26
  520. umlal $ACC2,$IN23_3,${S4}[2]
  521. ubfx x11,x13,#14,#26
  522. umlal $ACC1,$IN23_3,${S3}[2]
  523. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  524. umlal $ACC0,$IN23_3,${S2}[2]
  525. fmov $IN23_1,x6
  526. add $IN01_2,$IN01_2,$H2
  527. add x12,$padbit,x12,lsr#40
  528. umlal $ACC4,$IN23_4,${R0}[2]
  529. add x13,$padbit,x13,lsr#40
  530. umlal $ACC3,$IN23_4,${S4}[2]
  531. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  532. umlal $ACC2,$IN23_4,${S3}[2]
  533. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  534. umlal $ACC1,$IN23_4,${S2}[2]
  535. fmov $IN23_2,x8
  536. umlal $ACC0,$IN23_4,${S1}[2]
  537. fmov $IN23_3,x10
  538. ////////////////////////////////////////////////////////////////
  539. // (hash+inp[0:1])*r^4 and accumulate
  540. add $IN01_0,$IN01_0,$H0
  541. fmov $IN23_4,x12
  542. umlal $ACC3,$IN01_2,${R1}[0]
  543. ldp x8,x12,[$inp],#16 // inp[0:1]
  544. umlal $ACC0,$IN01_2,${S3}[0]
  545. ldp x9,x13,[$inp],#48
  546. umlal $ACC4,$IN01_2,${R2}[0]
  547. umlal $ACC1,$IN01_2,${S4}[0]
  548. umlal $ACC2,$IN01_2,${R0}[0]
  549. #ifdef __AARCH64EB__
  550. rev x8,x8
  551. rev x12,x12
  552. rev x9,x9
  553. rev x13,x13
  554. #endif
  555. add $IN01_1,$IN01_1,$H1
  556. umlal $ACC3,$IN01_0,${R3}[0]
  557. umlal $ACC4,$IN01_0,${R4}[0]
  558. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  559. umlal $ACC2,$IN01_0,${R2}[0]
  560. and x5,x9,#0x03ffffff
  561. umlal $ACC0,$IN01_0,${R0}[0]
  562. ubfx x6,x8,#26,#26
  563. umlal $ACC1,$IN01_0,${R1}[0]
  564. ubfx x7,x9,#26,#26
  565. add $IN01_3,$IN01_3,$H3
  566. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  567. umlal $ACC3,$IN01_1,${R2}[0]
  568. extr x8,x12,x8,#52
  569. umlal $ACC4,$IN01_1,${R3}[0]
  570. extr x9,x13,x9,#52
  571. umlal $ACC0,$IN01_1,${S4}[0]
  572. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  573. umlal $ACC2,$IN01_1,${R1}[0]
  574. fmov $IN01_0,x4
  575. umlal $ACC1,$IN01_1,${R0}[0]
  576. and x8,x8,#0x03ffffff
  577. add $IN01_4,$IN01_4,$H4
  578. and x9,x9,#0x03ffffff
  579. umlal $ACC3,$IN01_3,${R0}[0]
  580. ubfx x10,x12,#14,#26
  581. umlal $ACC0,$IN01_3,${S2}[0]
  582. ubfx x11,x13,#14,#26
  583. umlal $ACC4,$IN01_3,${R1}[0]
  584. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  585. umlal $ACC1,$IN01_3,${S3}[0]
  586. fmov $IN01_1,x6
  587. umlal $ACC2,$IN01_3,${S4}[0]
  588. add x12,$padbit,x12,lsr#40
  589. umlal $ACC3,$IN01_4,${S4}[0]
  590. add x13,$padbit,x13,lsr#40
  591. umlal $ACC0,$IN01_4,${S1}[0]
  592. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  593. umlal $ACC4,$IN01_4,${R0}[0]
  594. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  595. umlal $ACC1,$IN01_4,${S2}[0]
  596. fmov $IN01_2,x8
  597. umlal $ACC2,$IN01_4,${S3}[0]
  598. fmov $IN01_3,x10
  599. fmov $IN01_4,x12
  600. /////////////////////////////////////////////////////////////////
  601. // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
  602. // and P. Schwabe
  603. //
  604. // [see discussion in poly1305-armv4 module]
  605. ushr $T0.2d,$ACC3,#26
  606. xtn $H3,$ACC3
  607. ushr $T1.2d,$ACC0,#26
  608. and $ACC0,$ACC0,$MASK.2d
  609. add $ACC4,$ACC4,$T0.2d // h3 -> h4
  610. bic $H3,#0xfc,lsl#24 // &=0x03ffffff
  611. add $ACC1,$ACC1,$T1.2d // h0 -> h1
  612. ushr $T0.2d,$ACC4,#26
  613. xtn $H4,$ACC4
  614. ushr $T1.2d,$ACC1,#26
  615. xtn $H1,$ACC1
  616. bic $H4,#0xfc,lsl#24
  617. add $ACC2,$ACC2,$T1.2d // h1 -> h2
  618. add $ACC0,$ACC0,$T0.2d
  619. shl $T0.2d,$T0.2d,#2
  620. shrn $T1.2s,$ACC2,#26
  621. xtn $H2,$ACC2
  622. add $ACC0,$ACC0,$T0.2d // h4 -> h0
  623. bic $H1,#0xfc,lsl#24
  624. add $H3,$H3,$T1.2s // h2 -> h3
  625. bic $H2,#0xfc,lsl#24
  626. shrn $T0.2s,$ACC0,#26
  627. xtn $H0,$ACC0
  628. ushr $T1.2s,$H3,#26
  629. bic $H3,#0xfc,lsl#24
  630. bic $H0,#0xfc,lsl#24
  631. add $H1,$H1,$T0.2s // h0 -> h1
  632. add $H4,$H4,$T1.2s // h3 -> h4
  633. b.hi .Loop_neon
  634. .Lskip_loop:
  635. dup $IN23_2,${IN23_2}[0]
  636. add $IN01_2,$IN01_2,$H2
  637. ////////////////////////////////////////////////////////////////
  638. // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
  639. adds $len,$len,#32
  640. b.ne .Long_tail
  641. dup $IN23_2,${IN01_2}[0]
  642. add $IN23_0,$IN01_0,$H0
  643. add $IN23_3,$IN01_3,$H3
  644. add $IN23_1,$IN01_1,$H1
  645. add $IN23_4,$IN01_4,$H4
  646. .Long_tail:
  647. dup $IN23_0,${IN23_0}[0]
  648. umull2 $ACC0,$IN23_2,${S3}
  649. umull2 $ACC3,$IN23_2,${R1}
  650. umull2 $ACC4,$IN23_2,${R2}
  651. umull2 $ACC2,$IN23_2,${R0}
  652. umull2 $ACC1,$IN23_2,${S4}
  653. dup $IN23_1,${IN23_1}[0]
  654. umlal2 $ACC0,$IN23_0,${R0}
  655. umlal2 $ACC2,$IN23_0,${R2}
  656. umlal2 $ACC3,$IN23_0,${R3}
  657. umlal2 $ACC4,$IN23_0,${R4}
  658. umlal2 $ACC1,$IN23_0,${R1}
  659. dup $IN23_3,${IN23_3}[0]
  660. umlal2 $ACC0,$IN23_1,${S4}
  661. umlal2 $ACC3,$IN23_1,${R2}
  662. umlal2 $ACC2,$IN23_1,${R1}
  663. umlal2 $ACC4,$IN23_1,${R3}
  664. umlal2 $ACC1,$IN23_1,${R0}
  665. dup $IN23_4,${IN23_4}[0]
  666. umlal2 $ACC3,$IN23_3,${R0}
  667. umlal2 $ACC4,$IN23_3,${R1}
  668. umlal2 $ACC0,$IN23_3,${S2}
  669. umlal2 $ACC1,$IN23_3,${S3}
  670. umlal2 $ACC2,$IN23_3,${S4}
  671. umlal2 $ACC3,$IN23_4,${S4}
  672. umlal2 $ACC0,$IN23_4,${S1}
  673. umlal2 $ACC4,$IN23_4,${R0}
  674. umlal2 $ACC1,$IN23_4,${S2}
  675. umlal2 $ACC2,$IN23_4,${S3}
  676. b.eq .Lshort_tail
  677. ////////////////////////////////////////////////////////////////
  678. // (hash+inp[0:1])*r^4:r^3 and accumulate
  679. add $IN01_0,$IN01_0,$H0
  680. umlal $ACC3,$IN01_2,${R1}
  681. umlal $ACC0,$IN01_2,${S3}
  682. umlal $ACC4,$IN01_2,${R2}
  683. umlal $ACC1,$IN01_2,${S4}
  684. umlal $ACC2,$IN01_2,${R0}
  685. add $IN01_1,$IN01_1,$H1
  686. umlal $ACC3,$IN01_0,${R3}
  687. umlal $ACC0,$IN01_0,${R0}
  688. umlal $ACC4,$IN01_0,${R4}
  689. umlal $ACC1,$IN01_0,${R1}
  690. umlal $ACC2,$IN01_0,${R2}
  691. add $IN01_3,$IN01_3,$H3
  692. umlal $ACC3,$IN01_1,${R2}
  693. umlal $ACC0,$IN01_1,${S4}
  694. umlal $ACC4,$IN01_1,${R3}
  695. umlal $ACC1,$IN01_1,${R0}
  696. umlal $ACC2,$IN01_1,${R1}
  697. add $IN01_4,$IN01_4,$H4
  698. umlal $ACC3,$IN01_3,${R0}
  699. umlal $ACC0,$IN01_3,${S2}
  700. umlal $ACC4,$IN01_3,${R1}
  701. umlal $ACC1,$IN01_3,${S3}
  702. umlal $ACC2,$IN01_3,${S4}
  703. umlal $ACC3,$IN01_4,${S4}
  704. umlal $ACC0,$IN01_4,${S1}
  705. umlal $ACC4,$IN01_4,${R0}
  706. umlal $ACC1,$IN01_4,${S2}
  707. umlal $ACC2,$IN01_4,${S3}
  708. .Lshort_tail:
  709. ////////////////////////////////////////////////////////////////
  710. // horizontal add
  711. addp $ACC3,$ACC3,$ACC3
  712. ldp d8,d9,[sp,#16] // meet ABI requirements
  713. addp $ACC0,$ACC0,$ACC0
  714. ldp d10,d11,[sp,#32]
  715. addp $ACC4,$ACC4,$ACC4
  716. ldp d12,d13,[sp,#48]
  717. addp $ACC1,$ACC1,$ACC1
  718. ldp d14,d15,[sp,#64]
  719. addp $ACC2,$ACC2,$ACC2
  720. ldr x30,[sp,#8]
  721. ////////////////////////////////////////////////////////////////
  722. // lazy reduction, but without narrowing
  723. ushr $T0.2d,$ACC3,#26
  724. and $ACC3,$ACC3,$MASK.2d
  725. ushr $T1.2d,$ACC0,#26
  726. and $ACC0,$ACC0,$MASK.2d
  727. add $ACC4,$ACC4,$T0.2d // h3 -> h4
  728. add $ACC1,$ACC1,$T1.2d // h0 -> h1
  729. ushr $T0.2d,$ACC4,#26
  730. and $ACC4,$ACC4,$MASK.2d
  731. ushr $T1.2d,$ACC1,#26
  732. and $ACC1,$ACC1,$MASK.2d
  733. add $ACC2,$ACC2,$T1.2d // h1 -> h2
  734. add $ACC0,$ACC0,$T0.2d
  735. shl $T0.2d,$T0.2d,#2
  736. ushr $T1.2d,$ACC2,#26
  737. and $ACC2,$ACC2,$MASK.2d
  738. add $ACC0,$ACC0,$T0.2d // h4 -> h0
  739. add $ACC3,$ACC3,$T1.2d // h2 -> h3
  740. ushr $T0.2d,$ACC0,#26
  741. and $ACC0,$ACC0,$MASK.2d
  742. ushr $T1.2d,$ACC3,#26
  743. and $ACC3,$ACC3,$MASK.2d
  744. add $ACC1,$ACC1,$T0.2d // h0 -> h1
  745. add $ACC4,$ACC4,$T1.2d // h3 -> h4
  746. ////////////////////////////////////////////////////////////////
  747. // write the result, can be partially reduced
  748. st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
  749. mov x4,#1
  750. st1 {$ACC4}[0],[$ctx]
  751. str x4,[$ctx,#8] // set is_base2_26
  752. ldr x29,[sp],#80
  753. .inst 0xd50323bf // autiasp
  754. ret
  755. .size poly1305_blocks_neon,.-poly1305_blocks_neon
  756. .align 5
  757. .Lzeros:
  758. .long 0,0,0,0,0,0,0,0
  759. .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm"
  760. .align 2
  761. #if !defined(__KERNEL__) && !defined(_WIN64)
  762. .comm OPENSSL_armcap_P,4,4
  763. .hidden OPENSSL_armcap_P
  764. #endif
  765. ___
  766. foreach (split("\n",$code)) {
  767. s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
  768. s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
  769. (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
  770. (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
  771. (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
  772. (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
  773. (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
  774. s/\.[124]([sd])\[/.$1\[/;
  775. s/w#x([0-9]+)/w$1/g;
  776. print $_,"\n";
  777. }
  778. close STDOUT;