tcp_offload.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * IPV4 GSO/GRO offload support
  4. * Linux INET implementation
  5. *
  6. * TCPv4 GSO/GRO support
  7. */
  8. #include <linux/indirect_call_wrapper.h>
  9. #include <linux/skbuff.h>
  10. #include <net/gro.h>
  11. #include <net/tcp.h>
  12. #include <net/protocol.h>
  13. static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
  14. unsigned int seq, unsigned int mss)
  15. {
  16. while (skb) {
  17. if (before(ts_seq, seq + mss)) {
  18. skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
  19. skb_shinfo(skb)->tskey = ts_seq;
  20. return;
  21. }
  22. skb = skb->next;
  23. seq += mss;
  24. }
  25. }
  26. static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
  27. netdev_features_t features)
  28. {
  29. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
  30. return ERR_PTR(-EINVAL);
  31. if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
  32. return ERR_PTR(-EINVAL);
  33. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  34. const struct iphdr *iph = ip_hdr(skb);
  35. struct tcphdr *th = tcp_hdr(skb);
  36. /* Set up checksum pseudo header, usually expect stack to
  37. * have done this already.
  38. */
  39. th->check = 0;
  40. skb->ip_summed = CHECKSUM_PARTIAL;
  41. __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
  42. }
  43. return tcp_gso_segment(skb, features);
  44. }
  45. struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
  46. netdev_features_t features)
  47. {
  48. struct sk_buff *segs = ERR_PTR(-EINVAL);
  49. unsigned int sum_truesize = 0;
  50. struct tcphdr *th;
  51. unsigned int thlen;
  52. unsigned int seq;
  53. unsigned int oldlen;
  54. unsigned int mss;
  55. struct sk_buff *gso_skb = skb;
  56. __sum16 newcheck;
  57. bool ooo_okay, copy_destructor;
  58. __wsum delta;
  59. th = tcp_hdr(skb);
  60. thlen = th->doff * 4;
  61. if (thlen < sizeof(*th))
  62. goto out;
  63. if (!pskb_may_pull(skb, thlen))
  64. goto out;
  65. oldlen = ~skb->len;
  66. __skb_pull(skb, thlen);
  67. mss = skb_shinfo(skb)->gso_size;
  68. if (unlikely(skb->len <= mss))
  69. goto out;
  70. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  71. /* Packet is from an untrusted source, reset gso_segs. */
  72. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  73. segs = NULL;
  74. goto out;
  75. }
  76. copy_destructor = gso_skb->destructor == tcp_wfree;
  77. ooo_okay = gso_skb->ooo_okay;
  78. /* All segments but the first should have ooo_okay cleared */
  79. skb->ooo_okay = 0;
  80. segs = skb_segment(skb, features);
  81. if (IS_ERR(segs))
  82. goto out;
  83. /* Only first segment might have ooo_okay set */
  84. segs->ooo_okay = ooo_okay;
  85. /* GSO partial and frag_list segmentation only requires splitting
  86. * the frame into an MSS multiple and possibly a remainder, both
  87. * cases return a GSO skb. So update the mss now.
  88. */
  89. if (skb_is_gso(segs))
  90. mss *= skb_shinfo(segs)->gso_segs;
  91. delta = (__force __wsum)htonl(oldlen + thlen + mss);
  92. skb = segs;
  93. th = tcp_hdr(skb);
  94. seq = ntohl(th->seq);
  95. if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
  96. tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
  97. newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
  98. while (skb->next) {
  99. th->fin = th->psh = 0;
  100. th->check = newcheck;
  101. if (skb->ip_summed == CHECKSUM_PARTIAL)
  102. gso_reset_checksum(skb, ~th->check);
  103. else
  104. th->check = gso_make_checksum(skb, ~th->check);
  105. seq += mss;
  106. if (copy_destructor) {
  107. skb->destructor = gso_skb->destructor;
  108. skb->sk = gso_skb->sk;
  109. sum_truesize += skb->truesize;
  110. }
  111. skb = skb->next;
  112. th = tcp_hdr(skb);
  113. th->seq = htonl(seq);
  114. th->cwr = 0;
  115. }
  116. /* Following permits TCP Small Queues to work well with GSO :
  117. * The callback to TCP stack will be called at the time last frag
  118. * is freed at TX completion, and not right now when gso_skb
  119. * is freed by GSO engine
  120. */
  121. if (copy_destructor) {
  122. int delta;
  123. swap(gso_skb->sk, skb->sk);
  124. swap(gso_skb->destructor, skb->destructor);
  125. sum_truesize += skb->truesize;
  126. delta = sum_truesize - gso_skb->truesize;
  127. /* In some pathological cases, delta can be negative.
  128. * We need to either use refcount_add() or refcount_sub_and_test()
  129. */
  130. if (likely(delta >= 0))
  131. refcount_add(delta, &skb->sk->sk_wmem_alloc);
  132. else
  133. WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
  134. }
  135. delta = (__force __wsum)htonl(oldlen +
  136. (skb_tail_pointer(skb) -
  137. skb_transport_header(skb)) +
  138. skb->data_len);
  139. th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
  140. if (skb->ip_summed == CHECKSUM_PARTIAL)
  141. gso_reset_checksum(skb, ~th->check);
  142. else
  143. th->check = gso_make_checksum(skb, ~th->check);
  144. out:
  145. return segs;
  146. }
  147. struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
  148. {
  149. struct sk_buff *pp = NULL;
  150. struct sk_buff *p;
  151. struct tcphdr *th;
  152. struct tcphdr *th2;
  153. unsigned int len;
  154. unsigned int thlen;
  155. __be32 flags;
  156. unsigned int mss = 1;
  157. unsigned int hlen;
  158. unsigned int off;
  159. int flush = 1;
  160. int i;
  161. off = skb_gro_offset(skb);
  162. hlen = off + sizeof(*th);
  163. th = skb_gro_header(skb, hlen, off);
  164. if (unlikely(!th))
  165. goto out;
  166. thlen = th->doff * 4;
  167. if (thlen < sizeof(*th))
  168. goto out;
  169. hlen = off + thlen;
  170. if (skb_gro_header_hard(skb, hlen)) {
  171. th = skb_gro_header_slow(skb, hlen, off);
  172. if (unlikely(!th))
  173. goto out;
  174. }
  175. skb_gro_pull(skb, thlen);
  176. len = skb_gro_len(skb);
  177. flags = tcp_flag_word(th);
  178. list_for_each_entry(p, head, list) {
  179. if (!NAPI_GRO_CB(p)->same_flow)
  180. continue;
  181. th2 = tcp_hdr(p);
  182. if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
  183. NAPI_GRO_CB(p)->same_flow = 0;
  184. continue;
  185. }
  186. goto found;
  187. }
  188. p = NULL;
  189. goto out_check_final;
  190. found:
  191. /* Include the IP ID check below from the inner most IP hdr */
  192. flush = NAPI_GRO_CB(p)->flush;
  193. flush |= (__force int)(flags & TCP_FLAG_CWR);
  194. flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
  195. ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
  196. flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
  197. for (i = sizeof(*th); i < thlen; i += 4)
  198. flush |= *(u32 *)((u8 *)th + i) ^
  199. *(u32 *)((u8 *)th2 + i);
  200. /* When we receive our second frame we can made a decision on if we
  201. * continue this flow as an atomic flow with a fixed ID or if we use
  202. * an incrementing ID.
  203. */
  204. if (NAPI_GRO_CB(p)->flush_id != 1 ||
  205. NAPI_GRO_CB(p)->count != 1 ||
  206. !NAPI_GRO_CB(p)->is_atomic)
  207. flush |= NAPI_GRO_CB(p)->flush_id;
  208. else
  209. NAPI_GRO_CB(p)->is_atomic = false;
  210. mss = skb_shinfo(p)->gso_size;
  211. /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
  212. * If it is a single frame, do not aggregate it if its length
  213. * is bigger than our mss.
  214. */
  215. if (unlikely(skb_is_gso(skb)))
  216. flush |= (mss != skb_shinfo(skb)->gso_size);
  217. else
  218. flush |= (len - 1) >= mss;
  219. flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
  220. #ifdef CONFIG_TLS_DEVICE
  221. flush |= p->decrypted ^ skb->decrypted;
  222. #endif
  223. if (flush || skb_gro_receive(p, skb)) {
  224. mss = 1;
  225. goto out_check_final;
  226. }
  227. tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
  228. out_check_final:
  229. /* Force a flush if last segment is smaller than mss. */
  230. if (unlikely(skb_is_gso(skb)))
  231. flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
  232. else
  233. flush = len < mss;
  234. flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
  235. TCP_FLAG_RST | TCP_FLAG_SYN |
  236. TCP_FLAG_FIN));
  237. if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
  238. pp = p;
  239. out:
  240. NAPI_GRO_CB(skb)->flush |= (flush != 0);
  241. return pp;
  242. }
  243. int tcp_gro_complete(struct sk_buff *skb)
  244. {
  245. struct tcphdr *th = tcp_hdr(skb);
  246. skb->csum_start = (unsigned char *)th - skb->head;
  247. skb->csum_offset = offsetof(struct tcphdr, check);
  248. skb->ip_summed = CHECKSUM_PARTIAL;
  249. skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  250. if (th->cwr)
  251. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  252. if (skb->encapsulation)
  253. skb->inner_transport_header = skb->transport_header;
  254. return 0;
  255. }
  256. EXPORT_SYMBOL(tcp_gro_complete);
  257. INDIRECT_CALLABLE_SCOPE
  258. struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
  259. {
  260. /* Don't bother verifying checksum if we're going to flush anyway. */
  261. if (!NAPI_GRO_CB(skb)->flush &&
  262. skb_gro_checksum_validate(skb, IPPROTO_TCP,
  263. inet_gro_compute_pseudo)) {
  264. NAPI_GRO_CB(skb)->flush = 1;
  265. return NULL;
  266. }
  267. return tcp_gro_receive(head, skb);
  268. }
  269. INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
  270. {
  271. const struct iphdr *iph = ip_hdr(skb);
  272. struct tcphdr *th = tcp_hdr(skb);
  273. th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
  274. iph->daddr, 0);
  275. skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
  276. if (NAPI_GRO_CB(skb)->is_atomic)
  277. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
  278. return tcp_gro_complete(skb);
  279. }
  280. static const struct net_offload tcpv4_offload = {
  281. .callbacks = {
  282. .gso_segment = tcp4_gso_segment,
  283. .gro_receive = tcp4_gro_receive,
  284. .gro_complete = tcp4_gro_complete,
  285. },
  286. };
  287. int __init tcpv4_offload_init(void)
  288. {
  289. return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
  290. }