checksum_32.S 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * INET An implementation of the TCP/IP protocol suite for the LINUX
  4. * operating system. INET is implemented using the BSD Socket
  5. * interface as the means of communication with the user level.
  6. *
  7. * IP/TCP/UDP checksumming routines
  8. *
  9. * Authors: Jorge Cwik, <[email protected]>
  10. * Arnt Gulbrandsen, <[email protected]>
  11. * Tom May, <[email protected]>
  12. * Pentium Pro/II routines:
  13. * Alexander Kjeldaas <[email protected]>
  14. * Finn Arne Gangstad <[email protected]>
  15. * Lots of code moved from tcp.c and ip.c; see those files
  16. * for more names.
  17. *
  18. * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
  19. * handling.
  20. * Andi Kleen, add zeroing on error
  21. * converted to pure assembler
  22. */
  23. #include <linux/linkage.h>
  24. #include <asm/errno.h>
  25. #include <asm/asm.h>
  26. #include <asm/export.h>
  27. #include <asm/nospec-branch.h>
  28. /*
  29. * computes a partial checksum, e.g. for TCP/UDP fragments
  30. */
  31. /*
  32. unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
  33. */
  34. .text
  35. #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
  36. /*
  37. * Experiments with Ethernet and SLIP connections show that buff
  38. * is aligned on either a 2-byte or 4-byte boundary. We get at
  39. * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
  40. * Fortunately, it is easy to convert 2-byte alignment to 4-byte
  41. * alignment for the unrolled loop.
  42. */
  43. SYM_FUNC_START(csum_partial)
  44. pushl %esi
  45. pushl %ebx
  46. movl 20(%esp),%eax # Function arg: unsigned int sum
  47. movl 16(%esp),%ecx # Function arg: int len
  48. movl 12(%esp),%esi # Function arg: unsigned char *buff
  49. testl $3, %esi # Check alignment.
  50. jz 2f # Jump if alignment is ok.
  51. testl $1, %esi # Check alignment.
  52. jz 10f # Jump if alignment is boundary of 2 bytes.
  53. # buf is odd
  54. dec %ecx
  55. jl 8f
  56. movzbl (%esi), %ebx
  57. adcl %ebx, %eax
  58. roll $8, %eax
  59. inc %esi
  60. testl $2, %esi
  61. jz 2f
  62. 10:
  63. subl $2, %ecx # Alignment uses up two bytes.
  64. jae 1f # Jump if we had at least two bytes.
  65. addl $2, %ecx # ecx was < 2. Deal with it.
  66. jmp 4f
  67. 1: movw (%esi), %bx
  68. addl $2, %esi
  69. addw %bx, %ax
  70. adcl $0, %eax
  71. 2:
  72. movl %ecx, %edx
  73. shrl $5, %ecx
  74. jz 2f
  75. testl %esi, %esi
  76. 1: movl (%esi), %ebx
  77. adcl %ebx, %eax
  78. movl 4(%esi), %ebx
  79. adcl %ebx, %eax
  80. movl 8(%esi), %ebx
  81. adcl %ebx, %eax
  82. movl 12(%esi), %ebx
  83. adcl %ebx, %eax
  84. movl 16(%esi), %ebx
  85. adcl %ebx, %eax
  86. movl 20(%esi), %ebx
  87. adcl %ebx, %eax
  88. movl 24(%esi), %ebx
  89. adcl %ebx, %eax
  90. movl 28(%esi), %ebx
  91. adcl %ebx, %eax
  92. lea 32(%esi), %esi
  93. dec %ecx
  94. jne 1b
  95. adcl $0, %eax
  96. 2: movl %edx, %ecx
  97. andl $0x1c, %edx
  98. je 4f
  99. shrl $2, %edx # This clears CF
  100. 3: adcl (%esi), %eax
  101. lea 4(%esi), %esi
  102. dec %edx
  103. jne 3b
  104. adcl $0, %eax
  105. 4: andl $3, %ecx
  106. jz 7f
  107. cmpl $2, %ecx
  108. jb 5f
  109. movw (%esi),%cx
  110. leal 2(%esi),%esi
  111. je 6f
  112. shll $16,%ecx
  113. 5: movb (%esi),%cl
  114. 6: addl %ecx,%eax
  115. adcl $0, %eax
  116. 7:
  117. testb $1, 12(%esp)
  118. jz 8f
  119. roll $8, %eax
  120. 8:
  121. popl %ebx
  122. popl %esi
  123. RET
  124. SYM_FUNC_END(csum_partial)
  125. #else
  126. /* Version for PentiumII/PPro */
  127. SYM_FUNC_START(csum_partial)
  128. pushl %esi
  129. pushl %ebx
  130. movl 20(%esp),%eax # Function arg: unsigned int sum
  131. movl 16(%esp),%ecx # Function arg: int len
  132. movl 12(%esp),%esi # Function arg: const unsigned char *buf
  133. testl $3, %esi
  134. jnz 25f
  135. 10:
  136. movl %ecx, %edx
  137. movl %ecx, %ebx
  138. andl $0x7c, %ebx
  139. shrl $7, %ecx
  140. addl %ebx,%esi
  141. shrl $2, %ebx
  142. negl %ebx
  143. lea 45f(%ebx,%ebx,2), %ebx
  144. testl %esi, %esi
  145. JMP_NOSPEC ebx
  146. # Handle 2-byte-aligned regions
  147. 20: addw (%esi), %ax
  148. lea 2(%esi), %esi
  149. adcl $0, %eax
  150. jmp 10b
  151. 25:
  152. testl $1, %esi
  153. jz 30f
  154. # buf is odd
  155. dec %ecx
  156. jl 90f
  157. movzbl (%esi), %ebx
  158. addl %ebx, %eax
  159. adcl $0, %eax
  160. roll $8, %eax
  161. inc %esi
  162. testl $2, %esi
  163. jz 10b
  164. 30: subl $2, %ecx
  165. ja 20b
  166. je 32f
  167. addl $2, %ecx
  168. jz 80f
  169. movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
  170. addl %ebx, %eax
  171. adcl $0, %eax
  172. jmp 80f
  173. 32:
  174. addw (%esi), %ax # csumming 2 bytes, 2-aligned
  175. adcl $0, %eax
  176. jmp 80f
  177. 40:
  178. addl -128(%esi), %eax
  179. adcl -124(%esi), %eax
  180. adcl -120(%esi), %eax
  181. adcl -116(%esi), %eax
  182. adcl -112(%esi), %eax
  183. adcl -108(%esi), %eax
  184. adcl -104(%esi), %eax
  185. adcl -100(%esi), %eax
  186. adcl -96(%esi), %eax
  187. adcl -92(%esi), %eax
  188. adcl -88(%esi), %eax
  189. adcl -84(%esi), %eax
  190. adcl -80(%esi), %eax
  191. adcl -76(%esi), %eax
  192. adcl -72(%esi), %eax
  193. adcl -68(%esi), %eax
  194. adcl -64(%esi), %eax
  195. adcl -60(%esi), %eax
  196. adcl -56(%esi), %eax
  197. adcl -52(%esi), %eax
  198. adcl -48(%esi), %eax
  199. adcl -44(%esi), %eax
  200. adcl -40(%esi), %eax
  201. adcl -36(%esi), %eax
  202. adcl -32(%esi), %eax
  203. adcl -28(%esi), %eax
  204. adcl -24(%esi), %eax
  205. adcl -20(%esi), %eax
  206. adcl -16(%esi), %eax
  207. adcl -12(%esi), %eax
  208. adcl -8(%esi), %eax
  209. adcl -4(%esi), %eax
  210. 45:
  211. lea 128(%esi), %esi
  212. adcl $0, %eax
  213. dec %ecx
  214. jge 40b
  215. movl %edx, %ecx
  216. 50: andl $3, %ecx
  217. jz 80f
  218. # Handle the last 1-3 bytes without jumping
  219. notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
  220. movl $0xffffff,%ebx # by the shll and shrl instructions
  221. shll $3,%ecx
  222. shrl %cl,%ebx
  223. andl -128(%esi),%ebx # esi is 4-aligned so should be ok
  224. addl %ebx,%eax
  225. adcl $0,%eax
  226. 80:
  227. testb $1, 12(%esp)
  228. jz 90f
  229. roll $8, %eax
  230. 90:
  231. popl %ebx
  232. popl %esi
  233. RET
  234. SYM_FUNC_END(csum_partial)
  235. #endif
  236. EXPORT_SYMBOL(csum_partial)
  237. /*
  238. unsigned int csum_partial_copy_generic (const char *src, char *dst,
  239. int len)
  240. */
  241. /*
  242. * Copy from ds while checksumming, otherwise like csum_partial
  243. */
  244. #define EXC(y...) \
  245. 9999: y; \
  246. _ASM_EXTABLE_TYPE(9999b, 7f, EX_TYPE_UACCESS | EX_FLAG_CLEAR_AX)
  247. #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
  248. #define ARGBASE 16
  249. #define FP 12
  250. SYM_FUNC_START(csum_partial_copy_generic)
  251. subl $4,%esp
  252. pushl %edi
  253. pushl %esi
  254. pushl %ebx
  255. movl ARGBASE+12(%esp),%ecx # len
  256. movl ARGBASE+4(%esp),%esi # src
  257. movl ARGBASE+8(%esp),%edi # dst
  258. movl $-1, %eax # sum
  259. testl $2, %edi # Check alignment.
  260. jz 2f # Jump if alignment is ok.
  261. subl $2, %ecx # Alignment uses up two bytes.
  262. jae 1f # Jump if we had at least two bytes.
  263. addl $2, %ecx # ecx was < 2. Deal with it.
  264. jmp 4f
  265. EXC(1: movw (%esi), %bx )
  266. addl $2, %esi
  267. EXC( movw %bx, (%edi) )
  268. addl $2, %edi
  269. addw %bx, %ax
  270. adcl $0, %eax
  271. 2:
  272. movl %ecx, FP(%esp)
  273. shrl $5, %ecx
  274. jz 2f
  275. testl %esi, %esi # what's wrong with clc?
  276. EXC(1: movl (%esi), %ebx )
  277. EXC( movl 4(%esi), %edx )
  278. adcl %ebx, %eax
  279. EXC( movl %ebx, (%edi) )
  280. adcl %edx, %eax
  281. EXC( movl %edx, 4(%edi) )
  282. EXC( movl 8(%esi), %ebx )
  283. EXC( movl 12(%esi), %edx )
  284. adcl %ebx, %eax
  285. EXC( movl %ebx, 8(%edi) )
  286. adcl %edx, %eax
  287. EXC( movl %edx, 12(%edi) )
  288. EXC( movl 16(%esi), %ebx )
  289. EXC( movl 20(%esi), %edx )
  290. adcl %ebx, %eax
  291. EXC( movl %ebx, 16(%edi) )
  292. adcl %edx, %eax
  293. EXC( movl %edx, 20(%edi) )
  294. EXC( movl 24(%esi), %ebx )
  295. EXC( movl 28(%esi), %edx )
  296. adcl %ebx, %eax
  297. EXC( movl %ebx, 24(%edi) )
  298. adcl %edx, %eax
  299. EXC( movl %edx, 28(%edi) )
  300. lea 32(%esi), %esi
  301. lea 32(%edi), %edi
  302. dec %ecx
  303. jne 1b
  304. adcl $0, %eax
  305. 2: movl FP(%esp), %edx
  306. movl %edx, %ecx
  307. andl $0x1c, %edx
  308. je 4f
  309. shrl $2, %edx # This clears CF
  310. EXC(3: movl (%esi), %ebx )
  311. adcl %ebx, %eax
  312. EXC( movl %ebx, (%edi) )
  313. lea 4(%esi), %esi
  314. lea 4(%edi), %edi
  315. dec %edx
  316. jne 3b
  317. adcl $0, %eax
  318. 4: andl $3, %ecx
  319. jz 7f
  320. cmpl $2, %ecx
  321. jb 5f
  322. EXC( movw (%esi), %cx )
  323. leal 2(%esi), %esi
  324. EXC( movw %cx, (%edi) )
  325. leal 2(%edi), %edi
  326. je 6f
  327. shll $16,%ecx
  328. EXC(5: movb (%esi), %cl )
  329. EXC( movb %cl, (%edi) )
  330. 6: addl %ecx, %eax
  331. adcl $0, %eax
  332. 7:
  333. popl %ebx
  334. popl %esi
  335. popl %edi
  336. popl %ecx # equivalent to addl $4,%esp
  337. RET
  338. SYM_FUNC_END(csum_partial_copy_generic)
  339. #else
  340. /* Version for PentiumII/PPro */
  341. #define ROUND1(x) \
  342. EXC(movl x(%esi), %ebx ) ; \
  343. addl %ebx, %eax ; \
  344. EXC(movl %ebx, x(%edi) ) ;
  345. #define ROUND(x) \
  346. EXC(movl x(%esi), %ebx ) ; \
  347. adcl %ebx, %eax ; \
  348. EXC(movl %ebx, x(%edi) ) ;
  349. #define ARGBASE 12
  350. SYM_FUNC_START(csum_partial_copy_generic)
  351. pushl %ebx
  352. pushl %edi
  353. pushl %esi
  354. movl ARGBASE+4(%esp),%esi #src
  355. movl ARGBASE+8(%esp),%edi #dst
  356. movl ARGBASE+12(%esp),%ecx #len
  357. movl $-1, %eax #sum
  358. # movl %ecx, %edx
  359. movl %ecx, %ebx
  360. movl %esi, %edx
  361. shrl $6, %ecx
  362. andl $0x3c, %ebx
  363. negl %ebx
  364. subl %ebx, %esi
  365. subl %ebx, %edi
  366. lea -1(%esi),%edx
  367. andl $-32,%edx
  368. lea 3f(%ebx,%ebx), %ebx
  369. testl %esi, %esi
  370. JMP_NOSPEC ebx
  371. 1: addl $64,%esi
  372. addl $64,%edi
  373. EXC(movb -32(%edx),%bl) ; EXC(movb (%edx),%bl)
  374. ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
  375. ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
  376. ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
  377. ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
  378. 3: adcl $0,%eax
  379. addl $64, %edx
  380. dec %ecx
  381. jge 1b
  382. 4: movl ARGBASE+12(%esp),%edx #len
  383. andl $3, %edx
  384. jz 7f
  385. cmpl $2, %edx
  386. jb 5f
  387. EXC( movw (%esi), %dx )
  388. leal 2(%esi), %esi
  389. EXC( movw %dx, (%edi) )
  390. leal 2(%edi), %edi
  391. je 6f
  392. shll $16,%edx
  393. 5:
  394. EXC( movb (%esi), %dl )
  395. EXC( movb %dl, (%edi) )
  396. 6: addl %edx, %eax
  397. adcl $0, %eax
  398. 7:
  399. popl %esi
  400. popl %edi
  401. popl %ebx
  402. RET
  403. SYM_FUNC_END(csum_partial_copy_generic)
  404. #undef ROUND
  405. #undef ROUND1
  406. #endif
  407. EXPORT_SYMBOL(csum_partial_copy_generic)