checksum.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * arch/arm/include/asm/checksum.h
  4. *
  5. * IP checksum routines
  6. *
  7. * Copyright (C) Original authors of ../asm-i386/checksum.h
  8. * Copyright (C) 1996-1999 Russell King
  9. */
  10. #ifndef __ASM_ARM_CHECKSUM_H
  11. #define __ASM_ARM_CHECKSUM_H
  12. #include <linux/in6.h>
  13. /*
  14. * computes the checksum of a memory block at buff, length len,
  15. * and adds in "sum" (32-bit)
  16. *
  17. * returns a 32-bit number suitable for feeding into itself
  18. * or csum_tcpudp_magic
  19. *
  20. * this function must be called with even lengths, except
  21. * for the last fragment, which may be odd
  22. *
  23. * it's best to have buff aligned on a 32-bit boundary
  24. */
  25. __wsum csum_partial(const void *buff, int len, __wsum sum);
  26. /*
  27. * the same as csum_partial, but copies from src while it
  28. * checksums, and handles user-space pointer exceptions correctly, when needed.
  29. *
  30. * here even more important to align src and dst on a 32-bit (or even
  31. * better 64-bit) boundary
  32. */
  33. __wsum
  34. csum_partial_copy_nocheck(const void *src, void *dst, int len);
  35. __wsum
  36. csum_partial_copy_from_user(const void __user *src, void *dst, int len);
  37. #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
  38. #define _HAVE_ARCH_CSUM_AND_COPY
  39. static inline
  40. __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
  41. {
  42. if (!access_ok(src, len))
  43. return 0;
  44. return csum_partial_copy_from_user(src, dst, len);
  45. }
  46. /*
  47. * Fold a partial checksum without adding pseudo headers
  48. */
  49. static inline __sum16 csum_fold(__wsum sum)
  50. {
  51. __asm__(
  52. "add %0, %1, %1, ror #16 @ csum_fold"
  53. : "=r" (sum)
  54. : "r" (sum)
  55. : "cc");
  56. return (__force __sum16)(~(__force u32)sum >> 16);
  57. }
  58. /*
  59. * This is a version of ip_compute_csum() optimized for IP headers,
  60. * which always checksum on 4 octet boundaries.
  61. */
  62. static inline __sum16
  63. ip_fast_csum(const void *iph, unsigned int ihl)
  64. {
  65. unsigned int tmp1;
  66. __wsum sum;
  67. __asm__ __volatile__(
  68. "ldr %0, [%1], #4 @ ip_fast_csum \n\
  69. ldr %3, [%1], #4 \n\
  70. sub %2, %2, #5 \n\
  71. adds %0, %0, %3 \n\
  72. ldr %3, [%1], #4 \n\
  73. adcs %0, %0, %3 \n\
  74. ldr %3, [%1], #4 \n\
  75. 1: adcs %0, %0, %3 \n\
  76. ldr %3, [%1], #4 \n\
  77. tst %2, #15 @ do this carefully \n\
  78. subne %2, %2, #1 @ without destroying \n\
  79. bne 1b @ the carry flag \n\
  80. adcs %0, %0, %3 \n\
  81. adc %0, %0, #0"
  82. : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
  83. : "1" (iph), "2" (ihl)
  84. : "cc", "memory");
  85. return csum_fold(sum);
  86. }
  87. static inline __wsum
  88. csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
  89. __u8 proto, __wsum sum)
  90. {
  91. u32 lenprot = len + proto;
  92. if (__builtin_constant_p(sum) && sum == 0) {
  93. __asm__(
  94. "adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t"
  95. #ifdef __ARMEB__
  96. "adcs %0, %0, %3 \n\t"
  97. #else
  98. "adcs %0, %0, %3, ror #8 \n\t"
  99. #endif
  100. "adc %0, %0, #0"
  101. : "=&r" (sum)
  102. : "r" (daddr), "r" (saddr), "r" (lenprot)
  103. : "cc");
  104. } else {
  105. __asm__(
  106. "adds %0, %1, %2 @ csum_tcpudp_nofold \n\t"
  107. "adcs %0, %0, %3 \n\t"
  108. #ifdef __ARMEB__
  109. "adcs %0, %0, %4 \n\t"
  110. #else
  111. "adcs %0, %0, %4, ror #8 \n\t"
  112. #endif
  113. "adc %0, %0, #0"
  114. : "=&r"(sum)
  115. : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
  116. : "cc");
  117. }
  118. return sum;
  119. }
  120. /*
  121. * computes the checksum of the TCP/UDP pseudo-header
  122. * returns a 16-bit checksum, already complemented
  123. */
  124. static inline __sum16
  125. csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
  126. __u8 proto, __wsum sum)
  127. {
  128. return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
  129. }
  130. /*
  131. * this routine is used for miscellaneous IP-like checksums, mainly
  132. * in icmp.c
  133. */
  134. static inline __sum16
  135. ip_compute_csum(const void *buff, int len)
  136. {
  137. return csum_fold(csum_partial(buff, len, 0));
  138. }
  139. #define _HAVE_ARCH_IPV6_CSUM
  140. extern __wsum
  141. __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
  142. __be32 proto, __wsum sum);
  143. static inline __sum16
  144. csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
  145. __u32 len, __u8 proto, __wsum sum)
  146. {
  147. return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
  148. htonl(proto), sum));
  149. }
  150. #endif