checksum_32.S 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * This file contains assembly-language implementations
  4. * of IP-style 1's complement checksum routines.
  5. *
  6. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  7. *
  8. * Severely hacked about by Paul Mackerras ([email protected]).
  9. */
  10. #include <linux/sys.h>
  11. #include <asm/processor.h>
  12. #include <asm/cache.h>
  13. #include <asm/errno.h>
  14. #include <asm/ppc_asm.h>
  15. #include <asm/export.h>
  16. .text
  17. /*
  18. * computes the checksum of a memory block at buff, length len,
  19. * and adds in "sum" (32-bit)
  20. *
  21. * __csum_partial(buff, len, sum)
  22. */
  23. _GLOBAL(__csum_partial)
  24. subi r3,r3,4
  25. srawi. r6,r4,2 /* Divide len by 4 and also clear carry */
  26. beq 3f /* if we're doing < 4 bytes */
  27. andi. r0,r3,2 /* Align buffer to longword boundary */
  28. beq+ 1f
  29. lhz r0,4(r3) /* do 2 bytes to get aligned */
  30. subi r4,r4,2
  31. addi r3,r3,2
  32. srwi. r6,r4,2 /* # words to do */
  33. adde r5,r5,r0
  34. beq 3f
  35. 1: andi. r6,r6,3 /* Prepare to handle words 4 by 4 */
  36. beq 21f
  37. mtctr r6
  38. 2: lwzu r0,4(r3)
  39. adde r5,r5,r0
  40. bdnz 2b
  41. 21: srwi. r6,r4,4 /* # blocks of 4 words to do */
  42. beq 3f
  43. lwz r0,4(r3)
  44. mtctr r6
  45. lwz r6,8(r3)
  46. adde r5,r5,r0
  47. lwz r7,12(r3)
  48. adde r5,r5,r6
  49. lwzu r8,16(r3)
  50. adde r5,r5,r7
  51. bdz 23f
  52. 22: lwz r0,4(r3)
  53. adde r5,r5,r8
  54. lwz r6,8(r3)
  55. adde r5,r5,r0
  56. lwz r7,12(r3)
  57. adde r5,r5,r6
  58. lwzu r8,16(r3)
  59. adde r5,r5,r7
  60. bdnz 22b
  61. 23: adde r5,r5,r8
  62. 3: andi. r0,r4,2
  63. beq+ 4f
  64. lhz r0,4(r3)
  65. addi r3,r3,2
  66. adde r5,r5,r0
  67. 4: andi. r0,r4,1
  68. beq+ 5f
  69. lbz r0,4(r3)
  70. slwi r0,r0,8 /* Upper byte of word */
  71. adde r5,r5,r0
  72. 5: addze r3,r5 /* add in final carry */
  73. blr
  74. EXPORT_SYMBOL(__csum_partial)
  75. /*
  76. * Computes the checksum of a memory block at src, length len,
  77. * and adds in 0xffffffff, while copying the block to dst.
  78. * If an access exception occurs it returns zero.
  79. *
  80. * csum_partial_copy_generic(src, dst, len)
  81. */
  82. #define CSUM_COPY_16_BYTES_WITHEX(n) \
  83. 8 ## n ## 0: \
  84. lwz r7,4(r4); \
  85. 8 ## n ## 1: \
  86. lwz r8,8(r4); \
  87. 8 ## n ## 2: \
  88. lwz r9,12(r4); \
  89. 8 ## n ## 3: \
  90. lwzu r10,16(r4); \
  91. 8 ## n ## 4: \
  92. stw r7,4(r6); \
  93. adde r12,r12,r7; \
  94. 8 ## n ## 5: \
  95. stw r8,8(r6); \
  96. adde r12,r12,r8; \
  97. 8 ## n ## 6: \
  98. stw r9,12(r6); \
  99. adde r12,r12,r9; \
  100. 8 ## n ## 7: \
  101. stwu r10,16(r6); \
  102. adde r12,r12,r10
  103. #define CSUM_COPY_16_BYTES_EXCODE(n) \
  104. EX_TABLE(8 ## n ## 0b, fault); \
  105. EX_TABLE(8 ## n ## 1b, fault); \
  106. EX_TABLE(8 ## n ## 2b, fault); \
  107. EX_TABLE(8 ## n ## 3b, fault); \
  108. EX_TABLE(8 ## n ## 4b, fault); \
  109. EX_TABLE(8 ## n ## 5b, fault); \
  110. EX_TABLE(8 ## n ## 6b, fault); \
  111. EX_TABLE(8 ## n ## 7b, fault);
  112. .text
  113. CACHELINE_BYTES = L1_CACHE_BYTES
  114. LG_CACHELINE_BYTES = L1_CACHE_SHIFT
  115. CACHELINE_MASK = (L1_CACHE_BYTES-1)
  116. _GLOBAL(csum_partial_copy_generic)
  117. li r12,-1
  118. addic r0,r0,0 /* clear carry */
  119. addi r6,r4,-4
  120. neg r0,r4
  121. addi r4,r3,-4
  122. andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
  123. crset 4*cr7+eq
  124. beq 58f
  125. cmplw 0,r5,r0 /* is this more than total to do? */
  126. blt 63f /* if not much to do */
  127. rlwinm r7,r6,3,0x8
  128. rlwnm r12,r12,r7,0,31 /* odd destination address: rotate one byte */
  129. cmplwi cr7,r7,0 /* is destination address even ? */
  130. andi. r8,r0,3 /* get it word-aligned first */
  131. mtctr r8
  132. beq+ 61f
  133. li r3,0
  134. 70: lbz r9,4(r4) /* do some bytes */
  135. addi r4,r4,1
  136. slwi r3,r3,8
  137. rlwimi r3,r9,0,24,31
  138. 71: stb r9,4(r6)
  139. addi r6,r6,1
  140. bdnz 70b
  141. adde r12,r12,r3
  142. 61: subf r5,r0,r5
  143. srwi. r0,r0,2
  144. mtctr r0
  145. beq 58f
  146. 72: lwzu r9,4(r4) /* do some words */
  147. adde r12,r12,r9
  148. 73: stwu r9,4(r6)
  149. bdnz 72b
  150. 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
  151. clrlwi r5,r5,32-LG_CACHELINE_BYTES
  152. li r11,4
  153. beq 63f
  154. /* Here we decide how far ahead to prefetch the source */
  155. li r3,4
  156. cmpwi r0,1
  157. li r7,0
  158. ble 114f
  159. li r7,1
  160. #if MAX_COPY_PREFETCH > 1
  161. /* Heuristically, for large transfers we prefetch
  162. MAX_COPY_PREFETCH cachelines ahead. For small transfers
  163. we prefetch 1 cacheline ahead. */
  164. cmpwi r0,MAX_COPY_PREFETCH
  165. ble 112f
  166. li r7,MAX_COPY_PREFETCH
  167. 112: mtctr r7
  168. 111: dcbt r3,r4
  169. addi r3,r3,CACHELINE_BYTES
  170. bdnz 111b
  171. #else
  172. dcbt r3,r4
  173. addi r3,r3,CACHELINE_BYTES
  174. #endif /* MAX_COPY_PREFETCH > 1 */
  175. 114: subf r8,r7,r0
  176. mr r0,r7
  177. mtctr r8
  178. 53: dcbt r3,r4
  179. 54: dcbz r11,r6
  180. /* the main body of the cacheline loop */
  181. CSUM_COPY_16_BYTES_WITHEX(0)
  182. #if L1_CACHE_BYTES >= 32
  183. CSUM_COPY_16_BYTES_WITHEX(1)
  184. #if L1_CACHE_BYTES >= 64
  185. CSUM_COPY_16_BYTES_WITHEX(2)
  186. CSUM_COPY_16_BYTES_WITHEX(3)
  187. #if L1_CACHE_BYTES >= 128
  188. CSUM_COPY_16_BYTES_WITHEX(4)
  189. CSUM_COPY_16_BYTES_WITHEX(5)
  190. CSUM_COPY_16_BYTES_WITHEX(6)
  191. CSUM_COPY_16_BYTES_WITHEX(7)
  192. #endif
  193. #endif
  194. #endif
  195. bdnz 53b
  196. cmpwi r0,0
  197. li r3,4
  198. li r7,0
  199. bne 114b
  200. 63: srwi. r0,r5,2
  201. mtctr r0
  202. beq 64f
  203. 30: lwzu r0,4(r4)
  204. adde r12,r12,r0
  205. 31: stwu r0,4(r6)
  206. bdnz 30b
  207. 64: andi. r0,r5,2
  208. beq+ 65f
  209. 40: lhz r0,4(r4)
  210. addi r4,r4,2
  211. 41: sth r0,4(r6)
  212. adde r12,r12,r0
  213. addi r6,r6,2
  214. 65: andi. r0,r5,1
  215. beq+ 66f
  216. 50: lbz r0,4(r4)
  217. 51: stb r0,4(r6)
  218. slwi r0,r0,8
  219. adde r12,r12,r0
  220. 66: addze r3,r12
  221. beqlr+ cr7
  222. rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
  223. blr
  224. fault:
  225. li r3,0
  226. blr
  227. EX_TABLE(70b, fault);
  228. EX_TABLE(71b, fault);
  229. EX_TABLE(72b, fault);
  230. EX_TABLE(73b, fault);
  231. EX_TABLE(54b, fault);
  232. /*
  233. * this stuff handles faults in the cacheline loop and branches to either
  234. * fault (if in read part) or fault (if in write part)
  235. */
  236. CSUM_COPY_16_BYTES_EXCODE(0)
  237. #if L1_CACHE_BYTES >= 32
  238. CSUM_COPY_16_BYTES_EXCODE(1)
  239. #if L1_CACHE_BYTES >= 64
  240. CSUM_COPY_16_BYTES_EXCODE(2)
  241. CSUM_COPY_16_BYTES_EXCODE(3)
  242. #if L1_CACHE_BYTES >= 128
  243. CSUM_COPY_16_BYTES_EXCODE(4)
  244. CSUM_COPY_16_BYTES_EXCODE(5)
  245. CSUM_COPY_16_BYTES_EXCODE(6)
  246. CSUM_COPY_16_BYTES_EXCODE(7)
  247. #endif
  248. #endif
  249. #endif
  250. EX_TABLE(30b, fault);
  251. EX_TABLE(31b, fault);
  252. EX_TABLE(40b, fault);
  253. EX_TABLE(41b, fault);
  254. EX_TABLE(50b, fault);
  255. EX_TABLE(51b, fault);
  256. EXPORT_SYMBOL(csum_partial_copy_generic)
  257. /*
  258. * __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
  259. * const struct in6_addr *daddr,
  260. * __u32 len, __u8 proto, __wsum sum)
  261. */
  262. _GLOBAL(csum_ipv6_magic)
  263. lwz r8, 0(r3)
  264. lwz r9, 4(r3)
  265. addc r0, r7, r8
  266. lwz r10, 8(r3)
  267. adde r0, r0, r9
  268. lwz r11, 12(r3)
  269. adde r0, r0, r10
  270. lwz r8, 0(r4)
  271. adde r0, r0, r11
  272. lwz r9, 4(r4)
  273. adde r0, r0, r8
  274. lwz r10, 8(r4)
  275. adde r0, r0, r9
  276. lwz r11, 12(r4)
  277. adde r0, r0, r10
  278. add r5, r5, r6 /* assumption: len + proto doesn't carry */
  279. adde r0, r0, r11
  280. adde r0, r0, r5
  281. addze r0, r0
  282. rotlwi r3, r0, 16
  283. add r3, r0, r3
  284. not r3, r3
  285. rlwinm r3, r3, 16, 16, 31
  286. blr
  287. EXPORT_SYMBOL(csum_ipv6_magic)