ip_tunnels.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_IP_TUNNELS_H
  3. #define __NET_IP_TUNNELS_H 1
  4. #include <linux/if_tunnel.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/skbuff.h>
  7. #include <linux/socket.h>
  8. #include <linux/types.h>
  9. #include <linux/u64_stats_sync.h>
  10. #include <linux/bitops.h>
  11. #include <net/dsfield.h>
  12. #include <net/gro_cells.h>
  13. #include <net/inet_ecn.h>
  14. #include <net/netns/generic.h>
  15. #include <net/rtnetlink.h>
  16. #include <net/lwtunnel.h>
  17. #include <net/dst_cache.h>
  18. #if IS_ENABLED(CONFIG_IPV6)
  19. #include <net/ipv6.h>
  20. #include <net/ip6_fib.h>
  21. #include <net/ip6_route.h>
  22. #endif
  23. /* Keep error state on tunnel for 30 sec */
  24. #define IPTUNNEL_ERR_TIMEO (30*HZ)
  25. /* Used to memset ip_tunnel padding. */
  26. #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
  27. /* Used to memset ipv4 address padding. */
  28. #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
  29. #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
  30. (sizeof_field(struct ip_tunnel_key, u) - \
  31. sizeof_field(struct ip_tunnel_key, u.ipv4))
  32. struct ip_tunnel_key {
  33. __be64 tun_id;
  34. union {
  35. struct {
  36. __be32 src;
  37. __be32 dst;
  38. } ipv4;
  39. struct {
  40. struct in6_addr src;
  41. struct in6_addr dst;
  42. } ipv6;
  43. } u;
  44. __be16 tun_flags;
  45. u8 tos; /* TOS for IPv4, TC for IPv6 */
  46. u8 ttl; /* TTL for IPv4, HL for IPv6 */
  47. __be32 label; /* Flow Label for IPv6 */
  48. __be16 tp_src;
  49. __be16 tp_dst;
  50. __u8 flow_flags;
  51. };
  52. /* Flags for ip_tunnel_info mode. */
  53. #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
  54. #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
  55. #define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */
  56. /* Maximum tunnel options length. */
  57. #define IP_TUNNEL_OPTS_MAX \
  58. GENMASK((sizeof_field(struct ip_tunnel_info, \
  59. options_len) * BITS_PER_BYTE) - 1, 0)
  60. struct ip_tunnel_info {
  61. struct ip_tunnel_key key;
  62. #ifdef CONFIG_DST_CACHE
  63. struct dst_cache dst_cache;
  64. #endif
  65. u8 options_len;
  66. u8 mode;
  67. };
  68. /* 6rd prefix/relay information */
  69. #ifdef CONFIG_IPV6_SIT_6RD
  70. struct ip_tunnel_6rd_parm {
  71. struct in6_addr prefix;
  72. __be32 relay_prefix;
  73. u16 prefixlen;
  74. u16 relay_prefixlen;
  75. };
  76. #endif
  77. struct ip_tunnel_encap {
  78. u16 type;
  79. u16 flags;
  80. __be16 sport;
  81. __be16 dport;
  82. };
  83. struct ip_tunnel_prl_entry {
  84. struct ip_tunnel_prl_entry __rcu *next;
  85. __be32 addr;
  86. u16 flags;
  87. struct rcu_head rcu_head;
  88. };
  89. struct metadata_dst;
  90. struct ip_tunnel {
  91. struct ip_tunnel __rcu *next;
  92. struct hlist_node hash_node;
  93. struct net_device *dev;
  94. netdevice_tracker dev_tracker;
  95. struct net *net; /* netns for packet i/o */
  96. unsigned long err_time; /* Time when the last ICMP error
  97. * arrived */
  98. int err_count; /* Number of arrived ICMP errors */
  99. /* These four fields used only by GRE */
  100. u32 i_seqno; /* The last seen seqno */
  101. atomic_t o_seqno; /* The last output seqno */
  102. int tun_hlen; /* Precalculated header length */
  103. /* These four fields used only by ERSPAN */
  104. u32 index; /* ERSPAN type II index */
  105. u8 erspan_ver; /* ERSPAN version */
  106. u8 dir; /* ERSPAN direction */
  107. u16 hwid; /* ERSPAN hardware ID */
  108. struct dst_cache dst_cache;
  109. struct ip_tunnel_parm parms;
  110. int mlink;
  111. int encap_hlen; /* Encap header length (FOU,GUE) */
  112. int hlen; /* tun_hlen + encap_hlen */
  113. struct ip_tunnel_encap encap;
  114. /* for SIT */
  115. #ifdef CONFIG_IPV6_SIT_6RD
  116. struct ip_tunnel_6rd_parm ip6rd;
  117. #endif
  118. struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
  119. unsigned int prl_count; /* # of entries in PRL */
  120. unsigned int ip_tnl_net_id;
  121. struct gro_cells gro_cells;
  122. __u32 fwmark;
  123. bool collect_md;
  124. bool ignore_df;
  125. };
  126. struct tnl_ptk_info {
  127. __be16 flags;
  128. __be16 proto;
  129. __be32 key;
  130. __be32 seq;
  131. int hdr_len;
  132. };
  133. #define PACKET_RCVD 0
  134. #define PACKET_REJECT 1
  135. #define PACKET_NEXT 2
  136. #define IP_TNL_HASH_BITS 7
  137. #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
  138. struct ip_tunnel_net {
  139. struct net_device *fb_tunnel_dev;
  140. struct rtnl_link_ops *rtnl_link_ops;
  141. struct hlist_head tunnels[IP_TNL_HASH_SIZE];
  142. struct ip_tunnel __rcu *collect_md_tun;
  143. int type;
  144. };
  145. static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
  146. __be32 saddr, __be32 daddr,
  147. u8 tos, u8 ttl, __be32 label,
  148. __be16 tp_src, __be16 tp_dst,
  149. __be64 tun_id, __be16 tun_flags)
  150. {
  151. key->tun_id = tun_id;
  152. key->u.ipv4.src = saddr;
  153. key->u.ipv4.dst = daddr;
  154. memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
  155. 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
  156. key->tos = tos;
  157. key->ttl = ttl;
  158. key->label = label;
  159. key->tun_flags = tun_flags;
  160. /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
  161. * the upper tunnel are used.
  162. * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
  163. */
  164. key->tp_src = tp_src;
  165. key->tp_dst = tp_dst;
  166. /* Clear struct padding. */
  167. if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
  168. memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
  169. 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
  170. }
  171. static inline bool
  172. ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
  173. const struct ip_tunnel_info *info)
  174. {
  175. if (skb->mark)
  176. return false;
  177. if (!info)
  178. return true;
  179. if (info->key.tun_flags & TUNNEL_NOCACHE)
  180. return false;
  181. return true;
  182. }
  183. static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
  184. *tun_info)
  185. {
  186. return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
  187. }
  188. static inline __be64 key32_to_tunnel_id(__be32 key)
  189. {
  190. #ifdef __BIG_ENDIAN
  191. return (__force __be64)key;
  192. #else
  193. return (__force __be64)((__force u64)key << 32);
  194. #endif
  195. }
  196. /* Returns the least-significant 32 bits of a __be64. */
  197. static inline __be32 tunnel_id_to_key32(__be64 tun_id)
  198. {
  199. #ifdef __BIG_ENDIAN
  200. return (__force __be32)tun_id;
  201. #else
  202. return (__force __be32)((__force u64)tun_id >> 32);
  203. #endif
  204. }
  205. #ifdef CONFIG_INET
  206. static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
  207. int proto,
  208. __be32 daddr, __be32 saddr,
  209. __be32 key, __u8 tos,
  210. struct net *net, int oif,
  211. __u32 mark, __u32 tun_inner_hash,
  212. __u8 flow_flags)
  213. {
  214. memset(fl4, 0, sizeof(*fl4));
  215. if (oif) {
  216. fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
  217. /* Legacy VRF/l3mdev use case */
  218. fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
  219. }
  220. fl4->daddr = daddr;
  221. fl4->saddr = saddr;
  222. fl4->flowi4_tos = tos;
  223. fl4->flowi4_proto = proto;
  224. fl4->fl4_gre_key = key;
  225. fl4->flowi4_mark = mark;
  226. fl4->flowi4_multipath_hash = tun_inner_hash;
  227. fl4->flowi4_flags = flow_flags;
  228. }
  229. int ip_tunnel_init(struct net_device *dev);
  230. void ip_tunnel_uninit(struct net_device *dev);
  231. void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
  232. struct net *ip_tunnel_get_link_net(const struct net_device *dev);
  233. int ip_tunnel_get_iflink(const struct net_device *dev);
  234. int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
  235. struct rtnl_link_ops *ops, char *devname);
  236. void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
  237. struct rtnl_link_ops *ops);
  238. void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
  239. const struct iphdr *tnl_params, const u8 protocol);
  240. void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
  241. const u8 proto, int tunnel_hlen);
  242. int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
  243. int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
  244. void __user *data, int cmd);
  245. int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
  246. int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
  247. struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
  248. int link, __be16 flags,
  249. __be32 remote, __be32 local,
  250. __be32 key);
  251. int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
  252. const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
  253. bool log_ecn_error);
  254. int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
  255. struct ip_tunnel_parm *p, __u32 fwmark);
  256. int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
  257. struct ip_tunnel_parm *p, __u32 fwmark);
  258. void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
  259. bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
  260. struct ip_tunnel_encap *encap);
  261. void ip_tunnel_netlink_parms(struct nlattr *data[],
  262. struct ip_tunnel_parm *parms);
  263. extern const struct header_ops ip_tunnel_header_ops;
  264. __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
  265. struct ip_tunnel_encap_ops {
  266. size_t (*encap_hlen)(struct ip_tunnel_encap *e);
  267. int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
  268. u8 *protocol, struct flowi4 *fl4);
  269. int (*err_handler)(struct sk_buff *skb, u32 info);
  270. };
  271. #define MAX_IPTUN_ENCAP_OPS 8
  272. extern const struct ip_tunnel_encap_ops __rcu *
  273. iptun_encaps[MAX_IPTUN_ENCAP_OPS];
  274. int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
  275. unsigned int num);
  276. int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
  277. unsigned int num);
  278. int ip_tunnel_encap_setup(struct ip_tunnel *t,
  279. struct ip_tunnel_encap *ipencap);
  280. static inline bool pskb_inet_may_pull(struct sk_buff *skb)
  281. {
  282. int nhlen;
  283. switch (skb->protocol) {
  284. #if IS_ENABLED(CONFIG_IPV6)
  285. case htons(ETH_P_IPV6):
  286. nhlen = sizeof(struct ipv6hdr);
  287. break;
  288. #endif
  289. case htons(ETH_P_IP):
  290. nhlen = sizeof(struct iphdr);
  291. break;
  292. default:
  293. nhlen = 0;
  294. }
  295. return pskb_network_may_pull(skb, nhlen);
  296. }
  297. static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
  298. {
  299. const struct ip_tunnel_encap_ops *ops;
  300. int hlen = -EINVAL;
  301. if (e->type == TUNNEL_ENCAP_NONE)
  302. return 0;
  303. if (e->type >= MAX_IPTUN_ENCAP_OPS)
  304. return -EINVAL;
  305. rcu_read_lock();
  306. ops = rcu_dereference(iptun_encaps[e->type]);
  307. if (likely(ops && ops->encap_hlen))
  308. hlen = ops->encap_hlen(e);
  309. rcu_read_unlock();
  310. return hlen;
  311. }
  312. static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
  313. u8 *protocol, struct flowi4 *fl4)
  314. {
  315. const struct ip_tunnel_encap_ops *ops;
  316. int ret = -EINVAL;
  317. if (t->encap.type == TUNNEL_ENCAP_NONE)
  318. return 0;
  319. if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
  320. return -EINVAL;
  321. rcu_read_lock();
  322. ops = rcu_dereference(iptun_encaps[t->encap.type]);
  323. if (likely(ops && ops->build_header))
  324. ret = ops->build_header(skb, &t->encap, protocol, fl4);
  325. rcu_read_unlock();
  326. return ret;
  327. }
  328. /* Extract dsfield from inner protocol */
  329. static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
  330. const struct sk_buff *skb)
  331. {
  332. __be16 payload_protocol = skb_protocol(skb, true);
  333. if (payload_protocol == htons(ETH_P_IP))
  334. return iph->tos;
  335. else if (payload_protocol == htons(ETH_P_IPV6))
  336. return ipv6_get_dsfield((const struct ipv6hdr *)iph);
  337. else
  338. return 0;
  339. }
  340. static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
  341. const struct sk_buff *skb)
  342. {
  343. __be16 payload_protocol = skb_protocol(skb, true);
  344. if (payload_protocol == htons(ETH_P_IP))
  345. return iph->ttl;
  346. else if (payload_protocol == htons(ETH_P_IPV6))
  347. return ((const struct ipv6hdr *)iph)->hop_limit;
  348. else
  349. return 0;
  350. }
  351. /* Propogate ECN bits out */
  352. static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
  353. const struct sk_buff *skb)
  354. {
  355. u8 inner = ip_tunnel_get_dsfield(iph, skb);
  356. return INET_ECN_encapsulate(tos, inner);
  357. }
  358. int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
  359. __be16 inner_proto, bool raw_proto, bool xnet);
  360. static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
  361. __be16 inner_proto, bool xnet)
  362. {
  363. return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
  364. }
  365. void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
  366. __be32 src, __be32 dst, u8 proto,
  367. u8 tos, u8 ttl, __be16 df, bool xnet);
  368. struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
  369. gfp_t flags);
  370. int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
  371. int headroom, bool reply);
  372. int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
  373. static inline int iptunnel_pull_offloads(struct sk_buff *skb)
  374. {
  375. if (skb_is_gso(skb)) {
  376. int err;
  377. err = skb_unclone(skb, GFP_ATOMIC);
  378. if (unlikely(err))
  379. return err;
  380. skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
  381. NETIF_F_GSO_SHIFT);
  382. }
  383. skb->encapsulation = 0;
  384. return 0;
  385. }
  386. static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
  387. {
  388. if (pkt_len > 0) {
  389. struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
  390. u64_stats_update_begin(&tstats->syncp);
  391. u64_stats_add(&tstats->tx_bytes, pkt_len);
  392. u64_stats_inc(&tstats->tx_packets);
  393. u64_stats_update_end(&tstats->syncp);
  394. put_cpu_ptr(tstats);
  395. return;
  396. }
  397. if (pkt_len < 0) {
  398. DEV_STATS_INC(dev, tx_errors);
  399. DEV_STATS_INC(dev, tx_aborted_errors);
  400. } else {
  401. DEV_STATS_INC(dev, tx_dropped);
  402. }
  403. }
  404. static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
  405. {
  406. return info + 1;
  407. }
  408. static inline void ip_tunnel_info_opts_get(void *to,
  409. const struct ip_tunnel_info *info)
  410. {
  411. memcpy(to, info + 1, info->options_len);
  412. }
  413. static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
  414. const void *from, int len,
  415. __be16 flags)
  416. {
  417. info->options_len = len;
  418. if (len > 0) {
  419. memcpy(ip_tunnel_info_opts(info), from, len);
  420. info->key.tun_flags |= flags;
  421. }
  422. }
  423. static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
  424. {
  425. return (struct ip_tunnel_info *)lwtstate->data;
  426. }
  427. DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
  428. /* Returns > 0 if metadata should be collected */
  429. static inline int ip_tunnel_collect_metadata(void)
  430. {
  431. return static_branch_unlikely(&ip_tunnel_metadata_cnt);
  432. }
  433. void __init ip_tunnel_core_init(void);
  434. void ip_tunnel_need_metadata(void);
  435. void ip_tunnel_unneed_metadata(void);
  436. #else /* CONFIG_INET */
  437. static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
  438. {
  439. return NULL;
  440. }
  441. static inline void ip_tunnel_need_metadata(void)
  442. {
  443. }
  444. static inline void ip_tunnel_unneed_metadata(void)
  445. {
  446. }
  447. static inline void ip_tunnel_info_opts_get(void *to,
  448. const struct ip_tunnel_info *info)
  449. {
  450. }
  451. static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
  452. const void *from, int len,
  453. __be16 flags)
  454. {
  455. info->options_len = 0;
  456. }
  457. #endif /* CONFIG_INET */
  458. #endif /* __NET_IP_TUNNELS_H */