avx2.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* -*- linux-c -*- ------------------------------------------------------- *
  3. *
  4. * Copyright (C) 2012 Intel Corporation
  5. * Author: Yuanhan Liu <[email protected]>
  6. *
  7. * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
  8. *
  9. * ----------------------------------------------------------------------- */
  10. /*
  11. * AVX2 implementation of RAID-6 syndrome functions
  12. *
  13. */
  14. #include <linux/raid/pq.h>
  15. #include "x86.h"
  16. static const struct raid6_avx2_constants {
  17. u64 x1d[4];
  18. } raid6_avx2_constants __aligned(32) = {
  19. { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
  20. 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
  21. };
  22. static int raid6_have_avx2(void)
  23. {
  24. return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX);
  25. }
  26. /*
  27. * Plain AVX2 implementation
  28. */
  29. static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
  30. {
  31. u8 **dptr = (u8 **)ptrs;
  32. u8 *p, *q;
  33. int d, z, z0;
  34. z0 = disks - 3; /* Highest data disk */
  35. p = dptr[z0+1]; /* XOR parity */
  36. q = dptr[z0+2]; /* RS syndrome */
  37. kernel_fpu_begin();
  38. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  39. asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* Zero temp */
  40. for (d = 0; d < bytes; d += 32) {
  41. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  42. asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
  43. asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
  44. asm volatile("vmovdqa %ymm2,%ymm4");/* Q[0] */
  45. asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d]));
  46. for (z = z0-2; z >= 0; z--) {
  47. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  48. asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
  49. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  50. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  51. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  52. asm volatile("vpxor %ymm6,%ymm2,%ymm2");
  53. asm volatile("vpxor %ymm6,%ymm4,%ymm4");
  54. asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d]));
  55. }
  56. asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
  57. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  58. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  59. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  60. asm volatile("vpxor %ymm6,%ymm2,%ymm2");
  61. asm volatile("vpxor %ymm6,%ymm4,%ymm4");
  62. asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
  63. asm volatile("vpxor %ymm2,%ymm2,%ymm2");
  64. asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
  65. asm volatile("vpxor %ymm4,%ymm4,%ymm4");
  66. }
  67. asm volatile("sfence" : : : "memory");
  68. kernel_fpu_end();
  69. }
  70. static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
  71. size_t bytes, void **ptrs)
  72. {
  73. u8 **dptr = (u8 **)ptrs;
  74. u8 *p, *q;
  75. int d, z, z0;
  76. z0 = stop; /* P/Q right side optimization */
  77. p = dptr[disks-2]; /* XOR parity */
  78. q = dptr[disks-1]; /* RS syndrome */
  79. kernel_fpu_begin();
  80. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  81. for (d = 0 ; d < bytes ; d += 32) {
  82. asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
  83. asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
  84. asm volatile("vpxor %ymm4,%ymm2,%ymm2");
  85. /* P/Q data pages */
  86. for (z = z0-1 ; z >= start ; z--) {
  87. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  88. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  89. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  90. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  91. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  92. asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
  93. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  94. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  95. }
  96. /* P/Q left side optimization */
  97. for (z = start-1 ; z >= 0 ; z--) {
  98. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  99. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  100. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  101. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  102. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  103. }
  104. asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
  105. /* Don't use movntdq for r/w memory area < cache line */
  106. asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
  107. asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
  108. }
  109. asm volatile("sfence" : : : "memory");
  110. kernel_fpu_end();
  111. }
  112. const struct raid6_calls raid6_avx2x1 = {
  113. raid6_avx21_gen_syndrome,
  114. raid6_avx21_xor_syndrome,
  115. raid6_have_avx2,
  116. "avx2x1",
  117. .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
  118. };
  119. /*
  120. * Unrolled-by-2 AVX2 implementation
  121. */
  122. static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
  123. {
  124. u8 **dptr = (u8 **)ptrs;
  125. u8 *p, *q;
  126. int d, z, z0;
  127. z0 = disks - 3; /* Highest data disk */
  128. p = dptr[z0+1]; /* XOR parity */
  129. q = dptr[z0+2]; /* RS syndrome */
  130. kernel_fpu_begin();
  131. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  132. asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
  133. /* We uniformly assume a single prefetch covers at least 32 bytes */
  134. for (d = 0; d < bytes; d += 64) {
  135. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  136. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32]));
  137. asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
  138. asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */
  139. asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */
  140. asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */
  141. for (z = z0-1; z >= 0; z--) {
  142. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  143. asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
  144. asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
  145. asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
  146. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  147. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  148. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  149. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  150. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  151. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  152. asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
  153. asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
  154. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  155. asm volatile("vpxor %ymm7,%ymm3,%ymm3");
  156. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  157. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  158. }
  159. asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
  160. asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
  161. asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
  162. asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
  163. }
  164. asm volatile("sfence" : : : "memory");
  165. kernel_fpu_end();
  166. }
  167. static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
  168. size_t bytes, void **ptrs)
  169. {
  170. u8 **dptr = (u8 **)ptrs;
  171. u8 *p, *q;
  172. int d, z, z0;
  173. z0 = stop; /* P/Q right side optimization */
  174. p = dptr[disks-2]; /* XOR parity */
  175. q = dptr[disks-1]; /* RS syndrome */
  176. kernel_fpu_begin();
  177. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  178. for (d = 0 ; d < bytes ; d += 64) {
  179. asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
  180. asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
  181. asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
  182. asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
  183. asm volatile("vpxor %ymm4,%ymm2,%ymm2");
  184. asm volatile("vpxor %ymm6,%ymm3,%ymm3");
  185. /* P/Q data pages */
  186. for (z = z0-1 ; z >= start ; z--) {
  187. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  188. asm volatile("vpxor %ymm7,%ymm7,%ymm7");
  189. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  190. asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
  191. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  192. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  193. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  194. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  195. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  196. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  197. asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
  198. asm volatile("vmovdqa %0,%%ymm7"
  199. :: "m" (dptr[z][d+32]));
  200. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  201. asm volatile("vpxor %ymm7,%ymm3,%ymm3");
  202. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  203. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  204. }
  205. /* P/Q left side optimization */
  206. for (z = start-1 ; z >= 0 ; z--) {
  207. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  208. asm volatile("vpxor %ymm7,%ymm7,%ymm7");
  209. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  210. asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
  211. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  212. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  213. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  214. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  215. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  216. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  217. }
  218. asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
  219. asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
  220. /* Don't use movntdq for r/w memory area < cache line */
  221. asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
  222. asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
  223. asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
  224. asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
  225. }
  226. asm volatile("sfence" : : : "memory");
  227. kernel_fpu_end();
  228. }
  229. const struct raid6_calls raid6_avx2x2 = {
  230. raid6_avx22_gen_syndrome,
  231. raid6_avx22_xor_syndrome,
  232. raid6_have_avx2,
  233. "avx2x2",
  234. .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
  235. };
  236. #ifdef CONFIG_X86_64
  237. /*
  238. * Unrolled-by-4 AVX2 implementation
  239. */
  240. static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
  241. {
  242. u8 **dptr = (u8 **)ptrs;
  243. u8 *p, *q;
  244. int d, z, z0;
  245. z0 = disks - 3; /* Highest data disk */
  246. p = dptr[z0+1]; /* XOR parity */
  247. q = dptr[z0+2]; /* RS syndrome */
  248. kernel_fpu_begin();
  249. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  250. asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
  251. asm volatile("vpxor %ymm2,%ymm2,%ymm2"); /* P[0] */
  252. asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* P[1] */
  253. asm volatile("vpxor %ymm4,%ymm4,%ymm4"); /* Q[0] */
  254. asm volatile("vpxor %ymm6,%ymm6,%ymm6"); /* Q[1] */
  255. asm volatile("vpxor %ymm10,%ymm10,%ymm10"); /* P[2] */
  256. asm volatile("vpxor %ymm11,%ymm11,%ymm11"); /* P[3] */
  257. asm volatile("vpxor %ymm12,%ymm12,%ymm12"); /* Q[2] */
  258. asm volatile("vpxor %ymm14,%ymm14,%ymm14"); /* Q[3] */
  259. for (d = 0; d < bytes; d += 128) {
  260. for (z = z0; z >= 0; z--) {
  261. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  262. asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
  263. asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64]));
  264. asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96]));
  265. asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
  266. asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
  267. asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13");
  268. asm volatile("vpcmpgtb %ymm14,%ymm1,%ymm15");
  269. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  270. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  271. asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
  272. asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
  273. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  274. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  275. asm volatile("vpand %ymm0,%ymm13,%ymm13");
  276. asm volatile("vpand %ymm0,%ymm15,%ymm15");
  277. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  278. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  279. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  280. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  281. asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
  282. asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
  283. asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64]));
  284. asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96]));
  285. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  286. asm volatile("vpxor %ymm7,%ymm3,%ymm3");
  287. asm volatile("vpxor %ymm13,%ymm10,%ymm10");
  288. asm volatile("vpxor %ymm15,%ymm11,%ymm11");
  289. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  290. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  291. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  292. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  293. }
  294. asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
  295. asm volatile("vpxor %ymm2,%ymm2,%ymm2");
  296. asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
  297. asm volatile("vpxor %ymm3,%ymm3,%ymm3");
  298. asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
  299. asm volatile("vpxor %ymm10,%ymm10,%ymm10");
  300. asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
  301. asm volatile("vpxor %ymm11,%ymm11,%ymm11");
  302. asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
  303. asm volatile("vpxor %ymm4,%ymm4,%ymm4");
  304. asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
  305. asm volatile("vpxor %ymm6,%ymm6,%ymm6");
  306. asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
  307. asm volatile("vpxor %ymm12,%ymm12,%ymm12");
  308. asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
  309. asm volatile("vpxor %ymm14,%ymm14,%ymm14");
  310. }
  311. asm volatile("sfence" : : : "memory");
  312. kernel_fpu_end();
  313. }
  314. static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
  315. size_t bytes, void **ptrs)
  316. {
  317. u8 **dptr = (u8 **)ptrs;
  318. u8 *p, *q;
  319. int d, z, z0;
  320. z0 = stop; /* P/Q right side optimization */
  321. p = dptr[disks-2]; /* XOR parity */
  322. q = dptr[disks-1]; /* RS syndrome */
  323. kernel_fpu_begin();
  324. asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
  325. for (d = 0 ; d < bytes ; d += 128) {
  326. asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
  327. asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
  328. asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
  329. asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
  330. asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
  331. asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
  332. asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
  333. asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
  334. asm volatile("vpxor %ymm4,%ymm2,%ymm2");
  335. asm volatile("vpxor %ymm6,%ymm3,%ymm3");
  336. asm volatile("vpxor %ymm12,%ymm10,%ymm10");
  337. asm volatile("vpxor %ymm14,%ymm11,%ymm11");
  338. /* P/Q data pages */
  339. for (z = z0-1 ; z >= start ; z--) {
  340. asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
  341. asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
  342. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  343. asm volatile("vpxor %ymm7,%ymm7,%ymm7");
  344. asm volatile("vpxor %ymm13,%ymm13,%ymm13");
  345. asm volatile("vpxor %ymm15,%ymm15,%ymm15");
  346. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  347. asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
  348. asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
  349. asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
  350. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  351. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  352. asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
  353. asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
  354. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  355. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  356. asm volatile("vpand %ymm0,%ymm13,%ymm13");
  357. asm volatile("vpand %ymm0,%ymm15,%ymm15");
  358. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  359. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  360. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  361. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  362. asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
  363. asm volatile("vmovdqa %0,%%ymm7"
  364. :: "m" (dptr[z][d+32]));
  365. asm volatile("vmovdqa %0,%%ymm13"
  366. :: "m" (dptr[z][d+64]));
  367. asm volatile("vmovdqa %0,%%ymm15"
  368. :: "m" (dptr[z][d+96]));
  369. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  370. asm volatile("vpxor %ymm7,%ymm3,%ymm3");
  371. asm volatile("vpxor %ymm13,%ymm10,%ymm10");
  372. asm volatile("vpxor %ymm15,%ymm11,%ymm11");
  373. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  374. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  375. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  376. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  377. }
  378. asm volatile("prefetchnta %0" :: "m" (q[d]));
  379. asm volatile("prefetchnta %0" :: "m" (q[d+64]));
  380. /* P/Q left side optimization */
  381. for (z = start-1 ; z >= 0 ; z--) {
  382. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  383. asm volatile("vpxor %ymm7,%ymm7,%ymm7");
  384. asm volatile("vpxor %ymm13,%ymm13,%ymm13");
  385. asm volatile("vpxor %ymm15,%ymm15,%ymm15");
  386. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  387. asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
  388. asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
  389. asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
  390. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  391. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  392. asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
  393. asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
  394. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  395. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  396. asm volatile("vpand %ymm0,%ymm13,%ymm13");
  397. asm volatile("vpand %ymm0,%ymm15,%ymm15");
  398. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  399. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  400. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  401. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  402. }
  403. asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
  404. asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
  405. asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
  406. asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
  407. asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
  408. asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
  409. asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
  410. asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
  411. asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
  412. asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
  413. asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
  414. asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
  415. }
  416. asm volatile("sfence" : : : "memory");
  417. kernel_fpu_end();
  418. }
  419. const struct raid6_calls raid6_avx2x4 = {
  420. raid6_avx24_gen_syndrome,
  421. raid6_avx24_xor_syndrome,
  422. raid6_have_avx2,
  423. "avx2x4",
  424. .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
  425. };
  426. #endif /* CONFIG_X86_64 */