nh-avx2-x86_64.S 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * NH - ε-almost-universal hash function, x86_64 AVX2 accelerated
  4. *
  5. * Copyright 2018 Google LLC
  6. *
  7. * Author: Eric Biggers <[email protected]>
  8. */
  9. #include <linux/linkage.h>
  10. #define PASS0_SUMS %ymm0
  11. #define PASS1_SUMS %ymm1
  12. #define PASS2_SUMS %ymm2
  13. #define PASS3_SUMS %ymm3
  14. #define K0 %ymm4
  15. #define K0_XMM %xmm4
  16. #define K1 %ymm5
  17. #define K1_XMM %xmm5
  18. #define K2 %ymm6
  19. #define K2_XMM %xmm6
  20. #define K3 %ymm7
  21. #define K3_XMM %xmm7
  22. #define T0 %ymm8
  23. #define T1 %ymm9
  24. #define T2 %ymm10
  25. #define T2_XMM %xmm10
  26. #define T3 %ymm11
  27. #define T3_XMM %xmm11
  28. #define T4 %ymm12
  29. #define T5 %ymm13
  30. #define T6 %ymm14
  31. #define T7 %ymm15
  32. #define KEY %rdi
  33. #define MESSAGE %rsi
  34. #define MESSAGE_LEN %rdx
  35. #define HASH %rcx
  36. .macro _nh_2xstride k0, k1, k2, k3
  37. // Add message words to key words
  38. vpaddd \k0, T3, T0
  39. vpaddd \k1, T3, T1
  40. vpaddd \k2, T3, T2
  41. vpaddd \k3, T3, T3
  42. // Multiply 32x32 => 64 and accumulate
  43. vpshufd $0x10, T0, T4
  44. vpshufd $0x32, T0, T0
  45. vpshufd $0x10, T1, T5
  46. vpshufd $0x32, T1, T1
  47. vpshufd $0x10, T2, T6
  48. vpshufd $0x32, T2, T2
  49. vpshufd $0x10, T3, T7
  50. vpshufd $0x32, T3, T3
  51. vpmuludq T4, T0, T0
  52. vpmuludq T5, T1, T1
  53. vpmuludq T6, T2, T2
  54. vpmuludq T7, T3, T3
  55. vpaddq T0, PASS0_SUMS, PASS0_SUMS
  56. vpaddq T1, PASS1_SUMS, PASS1_SUMS
  57. vpaddq T2, PASS2_SUMS, PASS2_SUMS
  58. vpaddq T3, PASS3_SUMS, PASS3_SUMS
  59. .endm
  60. /*
  61. * void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
  62. * u8 hash[NH_HASH_BYTES])
  63. *
  64. * It's guaranteed that message_len % 16 == 0.
  65. */
  66. SYM_FUNC_START(nh_avx2)
  67. vmovdqu 0x00(KEY), K0
  68. vmovdqu 0x10(KEY), K1
  69. add $0x20, KEY
  70. vpxor PASS0_SUMS, PASS0_SUMS, PASS0_SUMS
  71. vpxor PASS1_SUMS, PASS1_SUMS, PASS1_SUMS
  72. vpxor PASS2_SUMS, PASS2_SUMS, PASS2_SUMS
  73. vpxor PASS3_SUMS, PASS3_SUMS, PASS3_SUMS
  74. sub $0x40, MESSAGE_LEN
  75. jl .Lloop4_done
  76. .Lloop4:
  77. vmovdqu (MESSAGE), T3
  78. vmovdqu 0x00(KEY), K2
  79. vmovdqu 0x10(KEY), K3
  80. _nh_2xstride K0, K1, K2, K3
  81. vmovdqu 0x20(MESSAGE), T3
  82. vmovdqu 0x20(KEY), K0
  83. vmovdqu 0x30(KEY), K1
  84. _nh_2xstride K2, K3, K0, K1
  85. add $0x40, MESSAGE
  86. add $0x40, KEY
  87. sub $0x40, MESSAGE_LEN
  88. jge .Lloop4
  89. .Lloop4_done:
  90. and $0x3f, MESSAGE_LEN
  91. jz .Ldone
  92. cmp $0x20, MESSAGE_LEN
  93. jl .Llast
  94. // 2 or 3 strides remain; do 2 more.
  95. vmovdqu (MESSAGE), T3
  96. vmovdqu 0x00(KEY), K2
  97. vmovdqu 0x10(KEY), K3
  98. _nh_2xstride K0, K1, K2, K3
  99. add $0x20, MESSAGE
  100. add $0x20, KEY
  101. sub $0x20, MESSAGE_LEN
  102. jz .Ldone
  103. vmovdqa K2, K0
  104. vmovdqa K3, K1
  105. .Llast:
  106. // Last stride. Zero the high 128 bits of the message and keys so they
  107. // don't affect the result when processing them like 2 strides.
  108. vmovdqu (MESSAGE), T3_XMM
  109. vmovdqa K0_XMM, K0_XMM
  110. vmovdqa K1_XMM, K1_XMM
  111. vmovdqu 0x00(KEY), K2_XMM
  112. vmovdqu 0x10(KEY), K3_XMM
  113. _nh_2xstride K0, K1, K2, K3
  114. .Ldone:
  115. // Sum the accumulators for each pass, then store the sums to 'hash'
  116. // PASS0_SUMS is (0A 0B 0C 0D)
  117. // PASS1_SUMS is (1A 1B 1C 1D)
  118. // PASS2_SUMS is (2A 2B 2C 2D)
  119. // PASS3_SUMS is (3A 3B 3C 3D)
  120. // We need the horizontal sums:
  121. // (0A + 0B + 0C + 0D,
  122. // 1A + 1B + 1C + 1D,
  123. // 2A + 2B + 2C + 2D,
  124. // 3A + 3B + 3C + 3D)
  125. //
  126. vpunpcklqdq PASS1_SUMS, PASS0_SUMS, T0 // T0 = (0A 1A 0C 1C)
  127. vpunpckhqdq PASS1_SUMS, PASS0_SUMS, T1 // T1 = (0B 1B 0D 1D)
  128. vpunpcklqdq PASS3_SUMS, PASS2_SUMS, T2 // T2 = (2A 3A 2C 3C)
  129. vpunpckhqdq PASS3_SUMS, PASS2_SUMS, T3 // T3 = (2B 3B 2D 3D)
  130. vinserti128 $0x1, T2_XMM, T0, T4 // T4 = (0A 1A 2A 3A)
  131. vinserti128 $0x1, T3_XMM, T1, T5 // T5 = (0B 1B 2B 3B)
  132. vperm2i128 $0x31, T2, T0, T0 // T0 = (0C 1C 2C 3C)
  133. vperm2i128 $0x31, T3, T1, T1 // T1 = (0D 1D 2D 3D)
  134. vpaddq T5, T4, T4
  135. vpaddq T1, T0, T0
  136. vpaddq T4, T0, T0
  137. vmovdqu T0, (HASH)
  138. RET
  139. SYM_FUNC_END(nh_avx2)