recov_ssse3.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 Intel Corporation
  4. */
  5. #include <linux/raid/pq.h>
  6. #include "x86.h"
  7. static int raid6_has_ssse3(void)
  8. {
  9. return boot_cpu_has(X86_FEATURE_XMM) &&
  10. boot_cpu_has(X86_FEATURE_XMM2) &&
  11. boot_cpu_has(X86_FEATURE_SSSE3);
  12. }
  13. static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
  14. int failb, void **ptrs)
  15. {
  16. u8 *p, *q, *dp, *dq;
  17. const u8 *pbmul; /* P multiplier table for B data */
  18. const u8 *qmul; /* Q multiplier table (for both) */
  19. static const u8 __aligned(16) x0f[16] = {
  20. 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
  21. 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
  22. p = (u8 *)ptrs[disks-2];
  23. q = (u8 *)ptrs[disks-1];
  24. /* Compute syndrome with zero for the missing data pages
  25. Use the dead data pages as temporary storage for
  26. delta p and delta q */
  27. dp = (u8 *)ptrs[faila];
  28. ptrs[faila] = (void *)raid6_empty_zero_page;
  29. ptrs[disks-2] = dp;
  30. dq = (u8 *)ptrs[failb];
  31. ptrs[failb] = (void *)raid6_empty_zero_page;
  32. ptrs[disks-1] = dq;
  33. raid6_call.gen_syndrome(disks, bytes, ptrs);
  34. /* Restore pointer table */
  35. ptrs[faila] = dp;
  36. ptrs[failb] = dq;
  37. ptrs[disks-2] = p;
  38. ptrs[disks-1] = q;
  39. /* Now, pick the proper data tables */
  40. pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
  41. qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
  42. raid6_gfexp[failb]]];
  43. kernel_fpu_begin();
  44. asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
  45. #ifdef CONFIG_X86_64
  46. asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
  47. asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
  48. asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
  49. #endif
  50. /* Now do it... */
  51. while (bytes) {
  52. #ifdef CONFIG_X86_64
  53. /* xmm6, xmm14, xmm15 */
  54. asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
  55. asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
  56. asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
  57. asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
  58. asm volatile("pxor %0,%%xmm1" : : "m" (dq[0]));
  59. asm volatile("pxor %0,%%xmm9" : : "m" (dq[16]));
  60. asm volatile("pxor %0,%%xmm0" : : "m" (dp[0]));
  61. asm volatile("pxor %0,%%xmm8" : : "m" (dp[16]));
  62. /* xmm0/8 = px */
  63. asm volatile("movdqa %xmm6,%xmm4");
  64. asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
  65. asm volatile("movdqa %xmm6,%xmm12");
  66. asm volatile("movdqa %xmm5,%xmm13");
  67. asm volatile("movdqa %xmm1,%xmm3");
  68. asm volatile("movdqa %xmm9,%xmm11");
  69. asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
  70. asm volatile("movdqa %xmm8,%xmm10");
  71. asm volatile("psraw $4,%xmm1");
  72. asm volatile("psraw $4,%xmm9");
  73. asm volatile("pand %xmm7,%xmm3");
  74. asm volatile("pand %xmm7,%xmm11");
  75. asm volatile("pand %xmm7,%xmm1");
  76. asm volatile("pand %xmm7,%xmm9");
  77. asm volatile("pshufb %xmm3,%xmm4");
  78. asm volatile("pshufb %xmm11,%xmm12");
  79. asm volatile("pshufb %xmm1,%xmm5");
  80. asm volatile("pshufb %xmm9,%xmm13");
  81. asm volatile("pxor %xmm4,%xmm5");
  82. asm volatile("pxor %xmm12,%xmm13");
  83. /* xmm5/13 = qx */
  84. asm volatile("movdqa %xmm14,%xmm4");
  85. asm volatile("movdqa %xmm15,%xmm1");
  86. asm volatile("movdqa %xmm14,%xmm12");
  87. asm volatile("movdqa %xmm15,%xmm9");
  88. asm volatile("movdqa %xmm2,%xmm3");
  89. asm volatile("movdqa %xmm10,%xmm11");
  90. asm volatile("psraw $4,%xmm2");
  91. asm volatile("psraw $4,%xmm10");
  92. asm volatile("pand %xmm7,%xmm3");
  93. asm volatile("pand %xmm7,%xmm11");
  94. asm volatile("pand %xmm7,%xmm2");
  95. asm volatile("pand %xmm7,%xmm10");
  96. asm volatile("pshufb %xmm3,%xmm4");
  97. asm volatile("pshufb %xmm11,%xmm12");
  98. asm volatile("pshufb %xmm2,%xmm1");
  99. asm volatile("pshufb %xmm10,%xmm9");
  100. asm volatile("pxor %xmm4,%xmm1");
  101. asm volatile("pxor %xmm12,%xmm9");
  102. /* xmm1/9 = pbmul[px] */
  103. asm volatile("pxor %xmm5,%xmm1");
  104. asm volatile("pxor %xmm13,%xmm9");
  105. /* xmm1/9 = db = DQ */
  106. asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
  107. asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
  108. asm volatile("pxor %xmm1,%xmm0");
  109. asm volatile("pxor %xmm9,%xmm8");
  110. asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
  111. asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
  112. bytes -= 32;
  113. p += 32;
  114. q += 32;
  115. dp += 32;
  116. dq += 32;
  117. #else
  118. asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
  119. asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
  120. asm volatile("pxor %0,%%xmm1" : : "m" (*dq));
  121. asm volatile("pxor %0,%%xmm0" : : "m" (*dp));
  122. /* 1 = dq ^ q
  123. * 0 = dp ^ p
  124. */
  125. asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
  126. asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
  127. asm volatile("movdqa %xmm1,%xmm3");
  128. asm volatile("psraw $4,%xmm1");
  129. asm volatile("pand %xmm7,%xmm3");
  130. asm volatile("pand %xmm7,%xmm1");
  131. asm volatile("pshufb %xmm3,%xmm4");
  132. asm volatile("pshufb %xmm1,%xmm5");
  133. asm volatile("pxor %xmm4,%xmm5");
  134. asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
  135. /* xmm5 = qx */
  136. asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
  137. asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
  138. asm volatile("movdqa %xmm2,%xmm3");
  139. asm volatile("psraw $4,%xmm2");
  140. asm volatile("pand %xmm7,%xmm3");
  141. asm volatile("pand %xmm7,%xmm2");
  142. asm volatile("pshufb %xmm3,%xmm4");
  143. asm volatile("pshufb %xmm2,%xmm1");
  144. asm volatile("pxor %xmm4,%xmm1");
  145. /* xmm1 = pbmul[px] */
  146. asm volatile("pxor %xmm5,%xmm1");
  147. /* xmm1 = db = DQ */
  148. asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
  149. asm volatile("pxor %xmm1,%xmm0");
  150. asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
  151. bytes -= 16;
  152. p += 16;
  153. q += 16;
  154. dp += 16;
  155. dq += 16;
  156. #endif
  157. }
  158. kernel_fpu_end();
  159. }
  160. static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
  161. void **ptrs)
  162. {
  163. u8 *p, *q, *dq;
  164. const u8 *qmul; /* Q multiplier table */
  165. static const u8 __aligned(16) x0f[16] = {
  166. 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
  167. 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
  168. p = (u8 *)ptrs[disks-2];
  169. q = (u8 *)ptrs[disks-1];
  170. /* Compute syndrome with zero for the missing data page
  171. Use the dead data page as temporary storage for delta q */
  172. dq = (u8 *)ptrs[faila];
  173. ptrs[faila] = (void *)raid6_empty_zero_page;
  174. ptrs[disks-1] = dq;
  175. raid6_call.gen_syndrome(disks, bytes, ptrs);
  176. /* Restore pointer table */
  177. ptrs[faila] = dq;
  178. ptrs[disks-1] = q;
  179. /* Now, pick the proper data tables */
  180. qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
  181. kernel_fpu_begin();
  182. asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
  183. while (bytes) {
  184. #ifdef CONFIG_X86_64
  185. asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
  186. asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
  187. asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
  188. asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
  189. /* xmm3 = q[0] ^ dq[0] */
  190. asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
  191. asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
  192. /* xmm4 = q[16] ^ dq[16] */
  193. asm volatile("movdqa %xmm3, %xmm6");
  194. asm volatile("movdqa %xmm4, %xmm8");
  195. /* xmm4 = xmm8 = q[16] ^ dq[16] */
  196. asm volatile("psraw $4, %xmm3");
  197. asm volatile("pand %xmm7, %xmm6");
  198. asm volatile("pand %xmm7, %xmm3");
  199. asm volatile("pshufb %xmm6, %xmm0");
  200. asm volatile("pshufb %xmm3, %xmm1");
  201. asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
  202. asm volatile("pxor %xmm0, %xmm1");
  203. asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
  204. /* xmm1 = qmul[q[0] ^ dq[0]] */
  205. asm volatile("psraw $4, %xmm4");
  206. asm volatile("pand %xmm7, %xmm8");
  207. asm volatile("pand %xmm7, %xmm4");
  208. asm volatile("pshufb %xmm8, %xmm10");
  209. asm volatile("pshufb %xmm4, %xmm11");
  210. asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
  211. asm volatile("pxor %xmm10, %xmm11");
  212. asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
  213. /* xmm11 = qmul[q[16] ^ dq[16]] */
  214. asm volatile("pxor %xmm1, %xmm2");
  215. /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
  216. asm volatile("pxor %xmm11, %xmm12");
  217. /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
  218. asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
  219. asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
  220. asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
  221. asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
  222. bytes -= 32;
  223. p += 32;
  224. q += 32;
  225. dq += 32;
  226. #else
  227. asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
  228. asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
  229. asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
  230. asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
  231. /* xmm3 = *q ^ *dq */
  232. asm volatile("movdqa %xmm3, %xmm6");
  233. asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
  234. asm volatile("psraw $4, %xmm3");
  235. asm volatile("pand %xmm7, %xmm6");
  236. asm volatile("pand %xmm7, %xmm3");
  237. asm volatile("pshufb %xmm6, %xmm0");
  238. asm volatile("pshufb %xmm3, %xmm1");
  239. asm volatile("pxor %xmm0, %xmm1");
  240. /* xmm1 = qmul[*q ^ *dq */
  241. asm volatile("pxor %xmm1, %xmm2");
  242. /* xmm2 = *p ^ qmul[*q ^ *dq] */
  243. asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
  244. asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
  245. bytes -= 16;
  246. p += 16;
  247. q += 16;
  248. dq += 16;
  249. #endif
  250. }
  251. kernel_fpu_end();
  252. }
  253. const struct raid6_recov_calls raid6_recov_ssse3 = {
  254. .data2 = raid6_2data_recov_ssse3,
  255. .datap = raid6_datap_recov_ssse3,
  256. .valid = raid6_has_ssse3,
  257. #ifdef CONFIG_X86_64
  258. .name = "ssse3x2",
  259. #else
  260. .name = "ssse3x1",
  261. #endif
  262. .priority = 1,
  263. };