xor.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * arch/arm/include/asm/xor.h
  4. *
  5. * Copyright (C) 2001 Russell King
  6. */
  7. #include <linux/hardirq.h>
  8. #include <asm-generic/xor.h>
  9. #include <asm/hwcap.h>
  10. #include <asm/neon.h>
  11. #define __XOR(a1, a2) a1 ^= a2
  12. #define GET_BLOCK_2(dst) \
  13. __asm__("ldmia %0, {%1, %2}" \
  14. : "=r" (dst), "=r" (a1), "=r" (a2) \
  15. : "0" (dst))
  16. #define GET_BLOCK_4(dst) \
  17. __asm__("ldmia %0, {%1, %2, %3, %4}" \
  18. : "=r" (dst), "=r" (a1), "=r" (a2), "=r" (a3), "=r" (a4) \
  19. : "0" (dst))
  20. #define XOR_BLOCK_2(src) \
  21. __asm__("ldmia %0!, {%1, %2}" \
  22. : "=r" (src), "=r" (b1), "=r" (b2) \
  23. : "0" (src)); \
  24. __XOR(a1, b1); __XOR(a2, b2);
  25. #define XOR_BLOCK_4(src) \
  26. __asm__("ldmia %0!, {%1, %2, %3, %4}" \
  27. : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \
  28. : "0" (src)); \
  29. __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4)
  30. #define PUT_BLOCK_2(dst) \
  31. __asm__ __volatile__("stmia %0!, {%2, %3}" \
  32. : "=r" (dst) \
  33. : "0" (dst), "r" (a1), "r" (a2))
  34. #define PUT_BLOCK_4(dst) \
  35. __asm__ __volatile__("stmia %0!, {%2, %3, %4, %5}" \
  36. : "=r" (dst) \
  37. : "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4))
  38. static void
  39. xor_arm4regs_2(unsigned long bytes, unsigned long * __restrict p1,
  40. const unsigned long * __restrict p2)
  41. {
  42. unsigned int lines = bytes / sizeof(unsigned long) / 4;
  43. register unsigned int a1 __asm__("r4");
  44. register unsigned int a2 __asm__("r5");
  45. register unsigned int a3 __asm__("r6");
  46. register unsigned int a4 __asm__("r7");
  47. register unsigned int b1 __asm__("r8");
  48. register unsigned int b2 __asm__("r9");
  49. register unsigned int b3 __asm__("ip");
  50. register unsigned int b4 __asm__("lr");
  51. do {
  52. GET_BLOCK_4(p1);
  53. XOR_BLOCK_4(p2);
  54. PUT_BLOCK_4(p1);
  55. } while (--lines);
  56. }
  57. static void
  58. xor_arm4regs_3(unsigned long bytes, unsigned long * __restrict p1,
  59. const unsigned long * __restrict p2,
  60. const unsigned long * __restrict p3)
  61. {
  62. unsigned int lines = bytes / sizeof(unsigned long) / 4;
  63. register unsigned int a1 __asm__("r4");
  64. register unsigned int a2 __asm__("r5");
  65. register unsigned int a3 __asm__("r6");
  66. register unsigned int a4 __asm__("r7");
  67. register unsigned int b1 __asm__("r8");
  68. register unsigned int b2 __asm__("r9");
  69. register unsigned int b3 __asm__("ip");
  70. register unsigned int b4 __asm__("lr");
  71. do {
  72. GET_BLOCK_4(p1);
  73. XOR_BLOCK_4(p2);
  74. XOR_BLOCK_4(p3);
  75. PUT_BLOCK_4(p1);
  76. } while (--lines);
  77. }
  78. static void
  79. xor_arm4regs_4(unsigned long bytes, unsigned long * __restrict p1,
  80. const unsigned long * __restrict p2,
  81. const unsigned long * __restrict p3,
  82. const unsigned long * __restrict p4)
  83. {
  84. unsigned int lines = bytes / sizeof(unsigned long) / 2;
  85. register unsigned int a1 __asm__("r8");
  86. register unsigned int a2 __asm__("r9");
  87. register unsigned int b1 __asm__("ip");
  88. register unsigned int b2 __asm__("lr");
  89. do {
  90. GET_BLOCK_2(p1);
  91. XOR_BLOCK_2(p2);
  92. XOR_BLOCK_2(p3);
  93. XOR_BLOCK_2(p4);
  94. PUT_BLOCK_2(p1);
  95. } while (--lines);
  96. }
  97. static void
  98. xor_arm4regs_5(unsigned long bytes, unsigned long * __restrict p1,
  99. const unsigned long * __restrict p2,
  100. const unsigned long * __restrict p3,
  101. const unsigned long * __restrict p4,
  102. const unsigned long * __restrict p5)
  103. {
  104. unsigned int lines = bytes / sizeof(unsigned long) / 2;
  105. register unsigned int a1 __asm__("r8");
  106. register unsigned int a2 __asm__("r9");
  107. register unsigned int b1 __asm__("ip");
  108. register unsigned int b2 __asm__("lr");
  109. do {
  110. GET_BLOCK_2(p1);
  111. XOR_BLOCK_2(p2);
  112. XOR_BLOCK_2(p3);
  113. XOR_BLOCK_2(p4);
  114. XOR_BLOCK_2(p5);
  115. PUT_BLOCK_2(p1);
  116. } while (--lines);
  117. }
  118. static struct xor_block_template xor_block_arm4regs = {
  119. .name = "arm4regs",
  120. .do_2 = xor_arm4regs_2,
  121. .do_3 = xor_arm4regs_3,
  122. .do_4 = xor_arm4regs_4,
  123. .do_5 = xor_arm4regs_5,
  124. };
  125. #undef XOR_TRY_TEMPLATES
  126. #define XOR_TRY_TEMPLATES \
  127. do { \
  128. xor_speed(&xor_block_arm4regs); \
  129. xor_speed(&xor_block_8regs); \
  130. xor_speed(&xor_block_32regs); \
  131. NEON_TEMPLATES; \
  132. } while (0)
  133. #ifdef CONFIG_KERNEL_MODE_NEON
  134. extern struct xor_block_template const xor_block_neon_inner;
  135. static void
  136. xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
  137. const unsigned long * __restrict p2)
  138. {
  139. if (in_interrupt()) {
  140. xor_arm4regs_2(bytes, p1, p2);
  141. } else {
  142. kernel_neon_begin();
  143. xor_block_neon_inner.do_2(bytes, p1, p2);
  144. kernel_neon_end();
  145. }
  146. }
  147. static void
  148. xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
  149. const unsigned long * __restrict p2,
  150. const unsigned long * __restrict p3)
  151. {
  152. if (in_interrupt()) {
  153. xor_arm4regs_3(bytes, p1, p2, p3);
  154. } else {
  155. kernel_neon_begin();
  156. xor_block_neon_inner.do_3(bytes, p1, p2, p3);
  157. kernel_neon_end();
  158. }
  159. }
  160. static void
  161. xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
  162. const unsigned long * __restrict p2,
  163. const unsigned long * __restrict p3,
  164. const unsigned long * __restrict p4)
  165. {
  166. if (in_interrupt()) {
  167. xor_arm4regs_4(bytes, p1, p2, p3, p4);
  168. } else {
  169. kernel_neon_begin();
  170. xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
  171. kernel_neon_end();
  172. }
  173. }
  174. static void
  175. xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
  176. const unsigned long * __restrict p2,
  177. const unsigned long * __restrict p3,
  178. const unsigned long * __restrict p4,
  179. const unsigned long * __restrict p5)
  180. {
  181. if (in_interrupt()) {
  182. xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
  183. } else {
  184. kernel_neon_begin();
  185. xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
  186. kernel_neon_end();
  187. }
  188. }
  189. static struct xor_block_template xor_block_neon = {
  190. .name = "neon",
  191. .do_2 = xor_neon_2,
  192. .do_3 = xor_neon_3,
  193. .do_4 = xor_neon_4,
  194. .do_5 = xor_neon_5
  195. };
  196. #define NEON_TEMPLATES \
  197. do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
  198. #else
  199. #define NEON_TEMPLATES
  200. #endif