checksum.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * INET An implementation of the TCP/IP protocol suite for the LINUX
  4. * operating system. INET is implemented using the BSD Socket
  5. * interface as the means of communication with the user level.
  6. *
  7. * Checksumming functions for IP, TCP, UDP and so on
  8. *
  9. * Authors: Jorge Cwik, <[email protected]>
  10. * Arnt Gulbrandsen, <[email protected]>
  11. * Borrows very liberally from tcp.c and ip.c, see those
  12. * files for more names.
  13. */
  14. #ifndef _CHECKSUM_H
  15. #define _CHECKSUM_H
  16. #include <linux/errno.h>
  17. #include <asm/types.h>
  18. #include <asm/byteorder.h>
  19. #include <linux/uaccess.h>
  20. #include <asm/checksum.h>
  21. #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
  22. static __always_inline
  23. __wsum csum_and_copy_from_user (const void __user *src, void *dst,
  24. int len)
  25. {
  26. if (copy_from_user(dst, src, len))
  27. return 0;
  28. return csum_partial(dst, len, ~0U);
  29. }
  30. #endif
  31. #ifndef HAVE_CSUM_COPY_USER
  32. static __always_inline __wsum csum_and_copy_to_user
  33. (const void *src, void __user *dst, int len)
  34. {
  35. __wsum sum = csum_partial(src, len, ~0U);
  36. if (copy_to_user(dst, src, len) == 0)
  37. return sum;
  38. return 0;
  39. }
  40. #endif
  41. #ifndef _HAVE_ARCH_CSUM_AND_COPY
  42. static __always_inline __wsum
  43. csum_partial_copy_nocheck(const void *src, void *dst, int len)
  44. {
  45. memcpy(dst, src, len);
  46. return csum_partial(dst, len, 0);
  47. }
  48. #endif
  49. #ifndef HAVE_ARCH_CSUM_ADD
  50. static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
  51. {
  52. u32 res = (__force u32)csum;
  53. res += (__force u32)addend;
  54. return (__force __wsum)(res + (res < (__force u32)addend));
  55. }
  56. #endif
  57. static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
  58. {
  59. return csum_add(csum, ~addend);
  60. }
  61. static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
  62. {
  63. u16 res = (__force u16)csum;
  64. res += (__force u16)addend;
  65. return (__force __sum16)(res + (res < (__force u16)addend));
  66. }
  67. static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
  68. {
  69. return csum16_add(csum, ~addend);
  70. }
  71. #ifndef HAVE_ARCH_CSUM_SHIFT
  72. static __always_inline __wsum csum_shift(__wsum sum, int offset)
  73. {
  74. /* rotate sum to align it with a 16b boundary */
  75. if (offset & 1)
  76. return (__force __wsum)ror32((__force u32)sum, 8);
  77. return sum;
  78. }
  79. #endif
  80. static __always_inline __wsum
  81. csum_block_add(__wsum csum, __wsum csum2, int offset)
  82. {
  83. return csum_add(csum, csum_shift(csum2, offset));
  84. }
  85. static __always_inline __wsum
  86. csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
  87. {
  88. return csum_block_add(csum, csum2, offset);
  89. }
  90. static __always_inline __wsum
  91. csum_block_sub(__wsum csum, __wsum csum2, int offset)
  92. {
  93. return csum_block_add(csum, ~csum2, offset);
  94. }
  95. static __always_inline __wsum csum_unfold(__sum16 n)
  96. {
  97. return (__force __wsum)n;
  98. }
  99. static __always_inline
  100. __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
  101. {
  102. return csum_partial(buff, len, sum);
  103. }
  104. #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
  105. static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
  106. {
  107. *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
  108. }
  109. static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
  110. {
  111. __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
  112. *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
  113. }
  114. /* Implements RFC 1624 (Incremental Internet Checksum)
  115. * 3. Discussion states :
  116. * HC' = ~(~HC + ~m + m')
  117. * m : old value of a 16bit field
  118. * m' : new value of a 16bit field
  119. */
  120. static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
  121. {
  122. *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
  123. }
  124. static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
  125. {
  126. *csum = csum_add(csum_sub(*csum, old), new);
  127. }
  128. struct sk_buff;
  129. void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
  130. __be32 from, __be32 to, bool pseudohdr);
  131. void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
  132. const __be32 *from, const __be32 *to,
  133. bool pseudohdr);
  134. void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
  135. __wsum diff, bool pseudohdr);
  136. static __always_inline
  137. void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
  138. __be16 from, __be16 to, bool pseudohdr)
  139. {
  140. inet_proto_csum_replace4(sum, skb, (__force __be32)from,
  141. (__force __be32)to, pseudohdr);
  142. }
  143. static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
  144. int start, int offset)
  145. {
  146. __sum16 *psum = (__sum16 *)(ptr + offset);
  147. __wsum delta;
  148. /* Subtract out checksum up to start */
  149. csum = csum_sub(csum, csum_partial(ptr, start, 0));
  150. /* Set derived checksum in packet */
  151. delta = csum_sub((__force __wsum)csum_fold(csum),
  152. (__force __wsum)*psum);
  153. *psum = csum_fold(csum);
  154. return delta;
  155. }
  156. static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
  157. {
  158. *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
  159. }
  160. static __always_inline __wsum wsum_negate(__wsum val)
  161. {
  162. return (__force __wsum)-((__force u32)val);
  163. }
  164. #endif