offload.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * sctp_offload - GRO/GSO Offloading for SCTP
  4. *
  5. * Copyright (C) 2015, Marcelo Ricardo Leitner <[email protected]>
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/kernel.h>
  9. #include <linux/kprobes.h>
  10. #include <linux/socket.h>
  11. #include <linux/sctp.h>
  12. #include <linux/proc_fs.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/module.h>
  15. #include <linux/kfifo.h>
  16. #include <linux/time.h>
  17. #include <net/net_namespace.h>
  18. #include <linux/skbuff.h>
  19. #include <net/sctp/sctp.h>
  20. #include <net/sctp/checksum.h>
  21. #include <net/protocol.h>
  22. static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
  23. {
  24. skb->ip_summed = CHECKSUM_NONE;
  25. skb->csum_not_inet = 0;
  26. /* csum and csum_start in GSO CB may be needed to do the UDP
  27. * checksum when it's a UDP tunneling packet.
  28. */
  29. SKB_GSO_CB(skb)->csum = (__force __wsum)~0;
  30. SKB_GSO_CB(skb)->csum_start = skb_headroom(skb) + skb->len;
  31. return sctp_compute_cksum(skb, skb_transport_offset(skb));
  32. }
  33. static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
  34. netdev_features_t features)
  35. {
  36. struct sk_buff *segs = ERR_PTR(-EINVAL);
  37. struct sctphdr *sh;
  38. if (!skb_is_gso_sctp(skb))
  39. goto out;
  40. sh = sctp_hdr(skb);
  41. if (!pskb_may_pull(skb, sizeof(*sh)))
  42. goto out;
  43. __skb_pull(skb, sizeof(*sh));
  44. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  45. /* Packet is from an untrusted source, reset gso_segs. */
  46. struct skb_shared_info *pinfo = skb_shinfo(skb);
  47. struct sk_buff *frag_iter;
  48. pinfo->gso_segs = 0;
  49. if (skb->len != skb->data_len) {
  50. /* Means we have chunks in here too */
  51. pinfo->gso_segs++;
  52. }
  53. skb_walk_frags(skb, frag_iter)
  54. pinfo->gso_segs++;
  55. segs = NULL;
  56. goto out;
  57. }
  58. segs = skb_segment(skb, (features | NETIF_F_HW_CSUM) & ~NETIF_F_SG);
  59. if (IS_ERR(segs))
  60. goto out;
  61. /* All that is left is update SCTP CRC if necessary */
  62. if (!(features & NETIF_F_SCTP_CRC)) {
  63. for (skb = segs; skb; skb = skb->next) {
  64. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  65. sh = sctp_hdr(skb);
  66. sh->checksum = sctp_gso_make_checksum(skb);
  67. }
  68. }
  69. }
  70. out:
  71. return segs;
  72. }
  73. static const struct net_offload sctp_offload = {
  74. .callbacks = {
  75. .gso_segment = sctp_gso_segment,
  76. },
  77. };
  78. static const struct net_offload sctp6_offload = {
  79. .callbacks = {
  80. .gso_segment = sctp_gso_segment,
  81. },
  82. };
  83. int __init sctp_offload_init(void)
  84. {
  85. int ret;
  86. ret = inet_add_offload(&sctp_offload, IPPROTO_SCTP);
  87. if (ret)
  88. goto out;
  89. ret = inet6_add_offload(&sctp6_offload, IPPROTO_SCTP);
  90. if (ret)
  91. goto ipv4;
  92. crc32c_csum_stub = &sctp_csum_ops;
  93. return ret;
  94. ipv4:
  95. inet_del_offload(&sctp_offload, IPPROTO_SCTP);
  96. out:
  97. return ret;
  98. }