fou.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/module.h>
  3. #include <linux/errno.h>
  4. #include <linux/socket.h>
  5. #include <linux/skbuff.h>
  6. #include <linux/ip.h>
  7. #include <linux/icmp.h>
  8. #include <linux/udp.h>
  9. #include <linux/types.h>
  10. #include <linux/kernel.h>
  11. #include <net/genetlink.h>
  12. #include <net/gro.h>
  13. #include <net/gue.h>
  14. #include <net/fou.h>
  15. #include <net/ip.h>
  16. #include <net/protocol.h>
  17. #include <net/udp.h>
  18. #include <net/udp_tunnel.h>
  19. #include <uapi/linux/fou.h>
  20. #include <uapi/linux/genetlink.h>
  21. struct fou {
  22. struct socket *sock;
  23. u8 protocol;
  24. u8 flags;
  25. __be16 port;
  26. u8 family;
  27. u16 type;
  28. struct list_head list;
  29. struct rcu_head rcu;
  30. };
  31. #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
  32. struct fou_cfg {
  33. u16 type;
  34. u8 protocol;
  35. u8 flags;
  36. struct udp_port_cfg udp_config;
  37. };
  38. static unsigned int fou_net_id;
  39. struct fou_net {
  40. struct list_head fou_list;
  41. struct mutex fou_lock;
  42. };
  43. static inline struct fou *fou_from_sock(struct sock *sk)
  44. {
  45. return sk->sk_user_data;
  46. }
  47. static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
  48. {
  49. /* Remove 'len' bytes from the packet (UDP header and
  50. * FOU header if present).
  51. */
  52. if (fou->family == AF_INET)
  53. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
  54. else
  55. ipv6_hdr(skb)->payload_len =
  56. htons(ntohs(ipv6_hdr(skb)->payload_len) - len);
  57. __skb_pull(skb, len);
  58. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  59. skb_reset_transport_header(skb);
  60. return iptunnel_pull_offloads(skb);
  61. }
  62. static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
  63. {
  64. struct fou *fou = fou_from_sock(sk);
  65. if (!fou)
  66. return 1;
  67. if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
  68. goto drop;
  69. return -fou->protocol;
  70. drop:
  71. kfree_skb(skb);
  72. return 0;
  73. }
  74. static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
  75. void *data, size_t hdrlen, u8 ipproto,
  76. bool nopartial)
  77. {
  78. __be16 *pd = data;
  79. size_t start = ntohs(pd[0]);
  80. size_t offset = ntohs(pd[1]);
  81. size_t plen = sizeof(struct udphdr) + hdrlen +
  82. max_t(size_t, offset + sizeof(u16), start);
  83. if (skb->remcsum_offload)
  84. return guehdr;
  85. if (!pskb_may_pull(skb, plen))
  86. return NULL;
  87. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  88. skb_remcsum_process(skb, (void *)guehdr + hdrlen,
  89. start, offset, nopartial);
  90. return guehdr;
  91. }
  92. static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
  93. {
  94. /* No support yet */
  95. kfree_skb(skb);
  96. return 0;
  97. }
  98. static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
  99. {
  100. struct fou *fou = fou_from_sock(sk);
  101. size_t len, optlen, hdrlen;
  102. struct guehdr *guehdr;
  103. void *data;
  104. u16 doffset = 0;
  105. u8 proto_ctype;
  106. if (!fou)
  107. return 1;
  108. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  109. if (!pskb_may_pull(skb, len))
  110. goto drop;
  111. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  112. switch (guehdr->version) {
  113. case 0: /* Full GUE header present */
  114. break;
  115. case 1: {
  116. /* Direct encapsulation of IPv4 or IPv6 */
  117. int prot;
  118. switch (((struct iphdr *)guehdr)->version) {
  119. case 4:
  120. prot = IPPROTO_IPIP;
  121. break;
  122. case 6:
  123. prot = IPPROTO_IPV6;
  124. break;
  125. default:
  126. goto drop;
  127. }
  128. if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
  129. goto drop;
  130. return -prot;
  131. }
  132. default: /* Undefined version */
  133. goto drop;
  134. }
  135. optlen = guehdr->hlen << 2;
  136. len += optlen;
  137. if (!pskb_may_pull(skb, len))
  138. goto drop;
  139. /* guehdr may change after pull */
  140. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  141. if (validate_gue_flags(guehdr, optlen))
  142. goto drop;
  143. hdrlen = sizeof(struct guehdr) + optlen;
  144. if (fou->family == AF_INET)
  145. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
  146. else
  147. ipv6_hdr(skb)->payload_len =
  148. htons(ntohs(ipv6_hdr(skb)->payload_len) - len);
  149. /* Pull csum through the guehdr now . This can be used if
  150. * there is a remote checksum offload.
  151. */
  152. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  153. data = &guehdr[1];
  154. if (guehdr->flags & GUE_FLAG_PRIV) {
  155. __be32 flags = *(__be32 *)(data + doffset);
  156. doffset += GUE_LEN_PRIV;
  157. if (flags & GUE_PFLAG_REMCSUM) {
  158. guehdr = gue_remcsum(skb, guehdr, data + doffset,
  159. hdrlen, guehdr->proto_ctype,
  160. !!(fou->flags &
  161. FOU_F_REMCSUM_NOPARTIAL));
  162. if (!guehdr)
  163. goto drop;
  164. data = &guehdr[1];
  165. doffset += GUE_PLEN_REMCSUM;
  166. }
  167. }
  168. if (unlikely(guehdr->control))
  169. return gue_control_message(skb, guehdr);
  170. proto_ctype = guehdr->proto_ctype;
  171. __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
  172. skb_reset_transport_header(skb);
  173. if (iptunnel_pull_offloads(skb))
  174. goto drop;
  175. return -proto_ctype;
  176. drop:
  177. kfree_skb(skb);
  178. return 0;
  179. }
  180. static struct sk_buff *fou_gro_receive(struct sock *sk,
  181. struct list_head *head,
  182. struct sk_buff *skb)
  183. {
  184. const struct net_offload __rcu **offloads;
  185. u8 proto = fou_from_sock(sk)->protocol;
  186. const struct net_offload *ops;
  187. struct sk_buff *pp = NULL;
  188. /* We can clear the encap_mark for FOU as we are essentially doing
  189. * one of two possible things. We are either adding an L4 tunnel
  190. * header to the outer L3 tunnel header, or we are simply
  191. * treating the GRE tunnel header as though it is a UDP protocol
  192. * specific header such as VXLAN or GENEVE.
  193. */
  194. NAPI_GRO_CB(skb)->encap_mark = 0;
  195. /* Flag this frame as already having an outer encap header */
  196. NAPI_GRO_CB(skb)->is_fou = 1;
  197. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  198. ops = rcu_dereference(offloads[proto]);
  199. if (!ops || !ops->callbacks.gro_receive)
  200. goto out;
  201. pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
  202. out:
  203. return pp;
  204. }
  205. static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
  206. int nhoff)
  207. {
  208. const struct net_offload __rcu **offloads;
  209. u8 proto = fou_from_sock(sk)->protocol;
  210. const struct net_offload *ops;
  211. int err = -ENOSYS;
  212. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  213. ops = rcu_dereference(offloads[proto]);
  214. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  215. goto out;
  216. err = ops->callbacks.gro_complete(skb, nhoff);
  217. skb_set_inner_mac_header(skb, nhoff);
  218. out:
  219. return err;
  220. }
  221. static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
  222. struct guehdr *guehdr, void *data,
  223. size_t hdrlen, struct gro_remcsum *grc,
  224. bool nopartial)
  225. {
  226. __be16 *pd = data;
  227. size_t start = ntohs(pd[0]);
  228. size_t offset = ntohs(pd[1]);
  229. if (skb->remcsum_offload)
  230. return guehdr;
  231. if (!NAPI_GRO_CB(skb)->csum_valid)
  232. return NULL;
  233. guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
  234. start, offset, grc, nopartial);
  235. skb->remcsum_offload = 1;
  236. return guehdr;
  237. }
  238. static struct sk_buff *gue_gro_receive(struct sock *sk,
  239. struct list_head *head,
  240. struct sk_buff *skb)
  241. {
  242. const struct net_offload __rcu **offloads;
  243. const struct net_offload *ops;
  244. struct sk_buff *pp = NULL;
  245. struct sk_buff *p;
  246. struct guehdr *guehdr;
  247. size_t len, optlen, hdrlen, off;
  248. void *data;
  249. u16 doffset = 0;
  250. int flush = 1;
  251. struct fou *fou = fou_from_sock(sk);
  252. struct gro_remcsum grc;
  253. u8 proto;
  254. skb_gro_remcsum_init(&grc);
  255. off = skb_gro_offset(skb);
  256. len = off + sizeof(*guehdr);
  257. guehdr = skb_gro_header(skb, len, off);
  258. if (unlikely(!guehdr))
  259. goto out;
  260. switch (guehdr->version) {
  261. case 0:
  262. break;
  263. case 1:
  264. switch (((struct iphdr *)guehdr)->version) {
  265. case 4:
  266. proto = IPPROTO_IPIP;
  267. break;
  268. case 6:
  269. proto = IPPROTO_IPV6;
  270. break;
  271. default:
  272. goto out;
  273. }
  274. goto next_proto;
  275. default:
  276. goto out;
  277. }
  278. optlen = guehdr->hlen << 2;
  279. len += optlen;
  280. if (skb_gro_header_hard(skb, len)) {
  281. guehdr = skb_gro_header_slow(skb, len, off);
  282. if (unlikely(!guehdr))
  283. goto out;
  284. }
  285. if (unlikely(guehdr->control) || guehdr->version != 0 ||
  286. validate_gue_flags(guehdr, optlen))
  287. goto out;
  288. hdrlen = sizeof(*guehdr) + optlen;
  289. /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
  290. * this is needed if there is a remote checkcsum offload.
  291. */
  292. skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
  293. data = &guehdr[1];
  294. if (guehdr->flags & GUE_FLAG_PRIV) {
  295. __be32 flags = *(__be32 *)(data + doffset);
  296. doffset += GUE_LEN_PRIV;
  297. if (flags & GUE_PFLAG_REMCSUM) {
  298. guehdr = gue_gro_remcsum(skb, off, guehdr,
  299. data + doffset, hdrlen, &grc,
  300. !!(fou->flags &
  301. FOU_F_REMCSUM_NOPARTIAL));
  302. if (!guehdr)
  303. goto out;
  304. data = &guehdr[1];
  305. doffset += GUE_PLEN_REMCSUM;
  306. }
  307. }
  308. skb_gro_pull(skb, hdrlen);
  309. list_for_each_entry(p, head, list) {
  310. const struct guehdr *guehdr2;
  311. if (!NAPI_GRO_CB(p)->same_flow)
  312. continue;
  313. guehdr2 = (struct guehdr *)(p->data + off);
  314. /* Compare base GUE header to be equal (covers
  315. * hlen, version, proto_ctype, and flags.
  316. */
  317. if (guehdr->word != guehdr2->word) {
  318. NAPI_GRO_CB(p)->same_flow = 0;
  319. continue;
  320. }
  321. /* Compare optional fields are the same. */
  322. if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
  323. guehdr->hlen << 2)) {
  324. NAPI_GRO_CB(p)->same_flow = 0;
  325. continue;
  326. }
  327. }
  328. proto = guehdr->proto_ctype;
  329. next_proto:
  330. /* We can clear the encap_mark for GUE as we are essentially doing
  331. * one of two possible things. We are either adding an L4 tunnel
  332. * header to the outer L3 tunnel header, or we are simply
  333. * treating the GRE tunnel header as though it is a UDP protocol
  334. * specific header such as VXLAN or GENEVE.
  335. */
  336. NAPI_GRO_CB(skb)->encap_mark = 0;
  337. /* Flag this frame as already having an outer encap header */
  338. NAPI_GRO_CB(skb)->is_fou = 1;
  339. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  340. ops = rcu_dereference(offloads[proto]);
  341. if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
  342. goto out;
  343. pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
  344. flush = 0;
  345. out:
  346. skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
  347. return pp;
  348. }
  349. static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
  350. {
  351. struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
  352. const struct net_offload __rcu **offloads;
  353. const struct net_offload *ops;
  354. unsigned int guehlen = 0;
  355. u8 proto;
  356. int err = -ENOENT;
  357. switch (guehdr->version) {
  358. case 0:
  359. proto = guehdr->proto_ctype;
  360. guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
  361. break;
  362. case 1:
  363. switch (((struct iphdr *)guehdr)->version) {
  364. case 4:
  365. proto = IPPROTO_IPIP;
  366. break;
  367. case 6:
  368. proto = IPPROTO_IPV6;
  369. break;
  370. default:
  371. return err;
  372. }
  373. break;
  374. default:
  375. return err;
  376. }
  377. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  378. ops = rcu_dereference(offloads[proto]);
  379. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  380. goto out;
  381. err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
  382. skb_set_inner_mac_header(skb, nhoff + guehlen);
  383. out:
  384. return err;
  385. }
  386. static bool fou_cfg_cmp(struct fou *fou, struct fou_cfg *cfg)
  387. {
  388. struct sock *sk = fou->sock->sk;
  389. struct udp_port_cfg *udp_cfg = &cfg->udp_config;
  390. if (fou->family != udp_cfg->family ||
  391. fou->port != udp_cfg->local_udp_port ||
  392. sk->sk_dport != udp_cfg->peer_udp_port ||
  393. sk->sk_bound_dev_if != udp_cfg->bind_ifindex)
  394. return false;
  395. if (fou->family == AF_INET) {
  396. if (sk->sk_rcv_saddr != udp_cfg->local_ip.s_addr ||
  397. sk->sk_daddr != udp_cfg->peer_ip.s_addr)
  398. return false;
  399. else
  400. return true;
  401. #if IS_ENABLED(CONFIG_IPV6)
  402. } else {
  403. if (ipv6_addr_cmp(&sk->sk_v6_rcv_saddr, &udp_cfg->local_ip6) ||
  404. ipv6_addr_cmp(&sk->sk_v6_daddr, &udp_cfg->peer_ip6))
  405. return false;
  406. else
  407. return true;
  408. #endif
  409. }
  410. return false;
  411. }
  412. static int fou_add_to_port_list(struct net *net, struct fou *fou,
  413. struct fou_cfg *cfg)
  414. {
  415. struct fou_net *fn = net_generic(net, fou_net_id);
  416. struct fou *fout;
  417. mutex_lock(&fn->fou_lock);
  418. list_for_each_entry(fout, &fn->fou_list, list) {
  419. if (fou_cfg_cmp(fout, cfg)) {
  420. mutex_unlock(&fn->fou_lock);
  421. return -EALREADY;
  422. }
  423. }
  424. list_add(&fou->list, &fn->fou_list);
  425. mutex_unlock(&fn->fou_lock);
  426. return 0;
  427. }
  428. static void fou_release(struct fou *fou)
  429. {
  430. struct socket *sock = fou->sock;
  431. list_del(&fou->list);
  432. udp_tunnel_sock_release(sock);
  433. kfree_rcu(fou, rcu);
  434. }
  435. static int fou_create(struct net *net, struct fou_cfg *cfg,
  436. struct socket **sockp)
  437. {
  438. struct socket *sock = NULL;
  439. struct fou *fou = NULL;
  440. struct sock *sk;
  441. struct udp_tunnel_sock_cfg tunnel_cfg;
  442. int err;
  443. /* Open UDP socket */
  444. err = udp_sock_create(net, &cfg->udp_config, &sock);
  445. if (err < 0)
  446. goto error;
  447. /* Allocate FOU port structure */
  448. fou = kzalloc(sizeof(*fou), GFP_KERNEL);
  449. if (!fou) {
  450. err = -ENOMEM;
  451. goto error;
  452. }
  453. sk = sock->sk;
  454. fou->port = cfg->udp_config.local_udp_port;
  455. fou->family = cfg->udp_config.family;
  456. fou->flags = cfg->flags;
  457. fou->type = cfg->type;
  458. fou->sock = sock;
  459. memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
  460. tunnel_cfg.encap_type = 1;
  461. tunnel_cfg.sk_user_data = fou;
  462. tunnel_cfg.encap_destroy = NULL;
  463. /* Initial for fou type */
  464. switch (cfg->type) {
  465. case FOU_ENCAP_DIRECT:
  466. tunnel_cfg.encap_rcv = fou_udp_recv;
  467. tunnel_cfg.gro_receive = fou_gro_receive;
  468. tunnel_cfg.gro_complete = fou_gro_complete;
  469. fou->protocol = cfg->protocol;
  470. break;
  471. case FOU_ENCAP_GUE:
  472. tunnel_cfg.encap_rcv = gue_udp_recv;
  473. tunnel_cfg.gro_receive = gue_gro_receive;
  474. tunnel_cfg.gro_complete = gue_gro_complete;
  475. break;
  476. default:
  477. err = -EINVAL;
  478. goto error;
  479. }
  480. setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
  481. sk->sk_allocation = GFP_ATOMIC;
  482. err = fou_add_to_port_list(net, fou, cfg);
  483. if (err)
  484. goto error;
  485. if (sockp)
  486. *sockp = sock;
  487. return 0;
  488. error:
  489. kfree(fou);
  490. if (sock)
  491. udp_tunnel_sock_release(sock);
  492. return err;
  493. }
  494. static int fou_destroy(struct net *net, struct fou_cfg *cfg)
  495. {
  496. struct fou_net *fn = net_generic(net, fou_net_id);
  497. int err = -EINVAL;
  498. struct fou *fou;
  499. mutex_lock(&fn->fou_lock);
  500. list_for_each_entry(fou, &fn->fou_list, list) {
  501. if (fou_cfg_cmp(fou, cfg)) {
  502. fou_release(fou);
  503. err = 0;
  504. break;
  505. }
  506. }
  507. mutex_unlock(&fn->fou_lock);
  508. return err;
  509. }
  510. static struct genl_family fou_nl_family;
  511. static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
  512. [FOU_ATTR_PORT] = { .type = NLA_U16, },
  513. [FOU_ATTR_AF] = { .type = NLA_U8, },
  514. [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
  515. [FOU_ATTR_TYPE] = { .type = NLA_U8, },
  516. [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
  517. [FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
  518. [FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
  519. [FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), },
  520. [FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), },
  521. [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
  522. [FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
  523. };
  524. static int parse_nl_config(struct genl_info *info,
  525. struct fou_cfg *cfg)
  526. {
  527. bool has_local = false, has_peer = false;
  528. struct nlattr *attr;
  529. int ifindex;
  530. __be16 port;
  531. memset(cfg, 0, sizeof(*cfg));
  532. cfg->udp_config.family = AF_INET;
  533. if (info->attrs[FOU_ATTR_AF]) {
  534. u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
  535. switch (family) {
  536. case AF_INET:
  537. break;
  538. case AF_INET6:
  539. cfg->udp_config.ipv6_v6only = 1;
  540. break;
  541. default:
  542. return -EAFNOSUPPORT;
  543. }
  544. cfg->udp_config.family = family;
  545. }
  546. if (info->attrs[FOU_ATTR_PORT]) {
  547. port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
  548. cfg->udp_config.local_udp_port = port;
  549. }
  550. if (info->attrs[FOU_ATTR_IPPROTO])
  551. cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
  552. if (info->attrs[FOU_ATTR_TYPE])
  553. cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
  554. if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
  555. cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
  556. if (cfg->udp_config.family == AF_INET) {
  557. if (info->attrs[FOU_ATTR_LOCAL_V4]) {
  558. attr = info->attrs[FOU_ATTR_LOCAL_V4];
  559. cfg->udp_config.local_ip.s_addr = nla_get_in_addr(attr);
  560. has_local = true;
  561. }
  562. if (info->attrs[FOU_ATTR_PEER_V4]) {
  563. attr = info->attrs[FOU_ATTR_PEER_V4];
  564. cfg->udp_config.peer_ip.s_addr = nla_get_in_addr(attr);
  565. has_peer = true;
  566. }
  567. #if IS_ENABLED(CONFIG_IPV6)
  568. } else {
  569. if (info->attrs[FOU_ATTR_LOCAL_V6]) {
  570. attr = info->attrs[FOU_ATTR_LOCAL_V6];
  571. cfg->udp_config.local_ip6 = nla_get_in6_addr(attr);
  572. has_local = true;
  573. }
  574. if (info->attrs[FOU_ATTR_PEER_V6]) {
  575. attr = info->attrs[FOU_ATTR_PEER_V6];
  576. cfg->udp_config.peer_ip6 = nla_get_in6_addr(attr);
  577. has_peer = true;
  578. }
  579. #endif
  580. }
  581. if (has_peer) {
  582. if (info->attrs[FOU_ATTR_PEER_PORT]) {
  583. port = nla_get_be16(info->attrs[FOU_ATTR_PEER_PORT]);
  584. cfg->udp_config.peer_udp_port = port;
  585. } else {
  586. return -EINVAL;
  587. }
  588. }
  589. if (info->attrs[FOU_ATTR_IFINDEX]) {
  590. if (!has_local)
  591. return -EINVAL;
  592. ifindex = nla_get_s32(info->attrs[FOU_ATTR_IFINDEX]);
  593. cfg->udp_config.bind_ifindex = ifindex;
  594. }
  595. return 0;
  596. }
  597. static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
  598. {
  599. struct net *net = genl_info_net(info);
  600. struct fou_cfg cfg;
  601. int err;
  602. err = parse_nl_config(info, &cfg);
  603. if (err)
  604. return err;
  605. return fou_create(net, &cfg, NULL);
  606. }
  607. static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
  608. {
  609. struct net *net = genl_info_net(info);
  610. struct fou_cfg cfg;
  611. int err;
  612. err = parse_nl_config(info, &cfg);
  613. if (err)
  614. return err;
  615. return fou_destroy(net, &cfg);
  616. }
  617. static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
  618. {
  619. struct sock *sk = fou->sock->sk;
  620. if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
  621. nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
  622. nla_put_be16(msg, FOU_ATTR_PEER_PORT, sk->sk_dport) ||
  623. nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
  624. nla_put_u8(msg, FOU_ATTR_TYPE, fou->type) ||
  625. nla_put_s32(msg, FOU_ATTR_IFINDEX, sk->sk_bound_dev_if))
  626. return -1;
  627. if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
  628. if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
  629. return -1;
  630. if (fou->sock->sk->sk_family == AF_INET) {
  631. if (nla_put_in_addr(msg, FOU_ATTR_LOCAL_V4, sk->sk_rcv_saddr))
  632. return -1;
  633. if (nla_put_in_addr(msg, FOU_ATTR_PEER_V4, sk->sk_daddr))
  634. return -1;
  635. #if IS_ENABLED(CONFIG_IPV6)
  636. } else {
  637. if (nla_put_in6_addr(msg, FOU_ATTR_LOCAL_V6,
  638. &sk->sk_v6_rcv_saddr))
  639. return -1;
  640. if (nla_put_in6_addr(msg, FOU_ATTR_PEER_V6, &sk->sk_v6_daddr))
  641. return -1;
  642. #endif
  643. }
  644. return 0;
  645. }
  646. static int fou_dump_info(struct fou *fou, u32 portid, u32 seq,
  647. u32 flags, struct sk_buff *skb, u8 cmd)
  648. {
  649. void *hdr;
  650. hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
  651. if (!hdr)
  652. return -ENOMEM;
  653. if (fou_fill_info(fou, skb) < 0)
  654. goto nla_put_failure;
  655. genlmsg_end(skb, hdr);
  656. return 0;
  657. nla_put_failure:
  658. genlmsg_cancel(skb, hdr);
  659. return -EMSGSIZE;
  660. }
  661. static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
  662. {
  663. struct net *net = genl_info_net(info);
  664. struct fou_net *fn = net_generic(net, fou_net_id);
  665. struct sk_buff *msg;
  666. struct fou_cfg cfg;
  667. struct fou *fout;
  668. __be16 port;
  669. u8 family;
  670. int ret;
  671. ret = parse_nl_config(info, &cfg);
  672. if (ret)
  673. return ret;
  674. port = cfg.udp_config.local_udp_port;
  675. if (port == 0)
  676. return -EINVAL;
  677. family = cfg.udp_config.family;
  678. if (family != AF_INET && family != AF_INET6)
  679. return -EINVAL;
  680. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  681. if (!msg)
  682. return -ENOMEM;
  683. ret = -ESRCH;
  684. mutex_lock(&fn->fou_lock);
  685. list_for_each_entry(fout, &fn->fou_list, list) {
  686. if (fou_cfg_cmp(fout, &cfg)) {
  687. ret = fou_dump_info(fout, info->snd_portid,
  688. info->snd_seq, 0, msg,
  689. info->genlhdr->cmd);
  690. break;
  691. }
  692. }
  693. mutex_unlock(&fn->fou_lock);
  694. if (ret < 0)
  695. goto out_free;
  696. return genlmsg_reply(msg, info);
  697. out_free:
  698. nlmsg_free(msg);
  699. return ret;
  700. }
  701. static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
  702. {
  703. struct net *net = sock_net(skb->sk);
  704. struct fou_net *fn = net_generic(net, fou_net_id);
  705. struct fou *fout;
  706. int idx = 0, ret;
  707. mutex_lock(&fn->fou_lock);
  708. list_for_each_entry(fout, &fn->fou_list, list) {
  709. if (idx++ < cb->args[0])
  710. continue;
  711. ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
  712. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  713. skb, FOU_CMD_GET);
  714. if (ret)
  715. break;
  716. }
  717. mutex_unlock(&fn->fou_lock);
  718. cb->args[0] = idx;
  719. return skb->len;
  720. }
  721. static const struct genl_small_ops fou_nl_ops[] = {
  722. {
  723. .cmd = FOU_CMD_ADD,
  724. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  725. .doit = fou_nl_cmd_add_port,
  726. .flags = GENL_ADMIN_PERM,
  727. },
  728. {
  729. .cmd = FOU_CMD_DEL,
  730. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  731. .doit = fou_nl_cmd_rm_port,
  732. .flags = GENL_ADMIN_PERM,
  733. },
  734. {
  735. .cmd = FOU_CMD_GET,
  736. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  737. .doit = fou_nl_cmd_get_port,
  738. .dumpit = fou_nl_dump,
  739. },
  740. };
  741. static struct genl_family fou_nl_family __ro_after_init = {
  742. .hdrsize = 0,
  743. .name = FOU_GENL_NAME,
  744. .version = FOU_GENL_VERSION,
  745. .maxattr = FOU_ATTR_MAX,
  746. .policy = fou_nl_policy,
  747. .netnsok = true,
  748. .module = THIS_MODULE,
  749. .small_ops = fou_nl_ops,
  750. .n_small_ops = ARRAY_SIZE(fou_nl_ops),
  751. .resv_start_op = FOU_CMD_GET + 1,
  752. };
  753. size_t fou_encap_hlen(struct ip_tunnel_encap *e)
  754. {
  755. return sizeof(struct udphdr);
  756. }
  757. EXPORT_SYMBOL(fou_encap_hlen);
  758. size_t gue_encap_hlen(struct ip_tunnel_encap *e)
  759. {
  760. size_t len;
  761. bool need_priv = false;
  762. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  763. if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
  764. len += GUE_PLEN_REMCSUM;
  765. need_priv = true;
  766. }
  767. len += need_priv ? GUE_LEN_PRIV : 0;
  768. return len;
  769. }
  770. EXPORT_SYMBOL(gue_encap_hlen);
  771. int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  772. u8 *protocol, __be16 *sport, int type)
  773. {
  774. int err;
  775. err = iptunnel_handle_offloads(skb, type);
  776. if (err)
  777. return err;
  778. *sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  779. skb, 0, 0, false);
  780. return 0;
  781. }
  782. EXPORT_SYMBOL(__fou_build_header);
  783. int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  784. u8 *protocol, __be16 *sport, int type)
  785. {
  786. struct guehdr *guehdr;
  787. size_t hdrlen, optlen = 0;
  788. void *data;
  789. bool need_priv = false;
  790. int err;
  791. if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
  792. skb->ip_summed == CHECKSUM_PARTIAL) {
  793. optlen += GUE_PLEN_REMCSUM;
  794. type |= SKB_GSO_TUNNEL_REMCSUM;
  795. need_priv = true;
  796. }
  797. optlen += need_priv ? GUE_LEN_PRIV : 0;
  798. err = iptunnel_handle_offloads(skb, type);
  799. if (err)
  800. return err;
  801. /* Get source port (based on flow hash) before skb_push */
  802. *sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  803. skb, 0, 0, false);
  804. hdrlen = sizeof(struct guehdr) + optlen;
  805. skb_push(skb, hdrlen);
  806. guehdr = (struct guehdr *)skb->data;
  807. guehdr->control = 0;
  808. guehdr->version = 0;
  809. guehdr->hlen = optlen >> 2;
  810. guehdr->flags = 0;
  811. guehdr->proto_ctype = *protocol;
  812. data = &guehdr[1];
  813. if (need_priv) {
  814. __be32 *flags = data;
  815. guehdr->flags |= GUE_FLAG_PRIV;
  816. *flags = 0;
  817. data += GUE_LEN_PRIV;
  818. if (type & SKB_GSO_TUNNEL_REMCSUM) {
  819. u16 csum_start = skb_checksum_start_offset(skb);
  820. __be16 *pd = data;
  821. if (csum_start < hdrlen)
  822. return -EINVAL;
  823. csum_start -= hdrlen;
  824. pd[0] = htons(csum_start);
  825. pd[1] = htons(csum_start + skb->csum_offset);
  826. if (!skb_is_gso(skb)) {
  827. skb->ip_summed = CHECKSUM_NONE;
  828. skb->encapsulation = 0;
  829. }
  830. *flags |= GUE_PFLAG_REMCSUM;
  831. data += GUE_PLEN_REMCSUM;
  832. }
  833. }
  834. return 0;
  835. }
  836. EXPORT_SYMBOL(__gue_build_header);
  837. #ifdef CONFIG_NET_FOU_IP_TUNNELS
  838. static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
  839. struct flowi4 *fl4, u8 *protocol, __be16 sport)
  840. {
  841. struct udphdr *uh;
  842. skb_push(skb, sizeof(struct udphdr));
  843. skb_reset_transport_header(skb);
  844. uh = udp_hdr(skb);
  845. uh->dest = e->dport;
  846. uh->source = sport;
  847. uh->len = htons(skb->len);
  848. udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
  849. fl4->saddr, fl4->daddr, skb->len);
  850. *protocol = IPPROTO_UDP;
  851. }
  852. static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  853. u8 *protocol, struct flowi4 *fl4)
  854. {
  855. int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
  856. SKB_GSO_UDP_TUNNEL;
  857. __be16 sport;
  858. int err;
  859. err = __fou_build_header(skb, e, protocol, &sport, type);
  860. if (err)
  861. return err;
  862. fou_build_udp(skb, e, fl4, protocol, sport);
  863. return 0;
  864. }
  865. static int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  866. u8 *protocol, struct flowi4 *fl4)
  867. {
  868. int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
  869. SKB_GSO_UDP_TUNNEL;
  870. __be16 sport;
  871. int err;
  872. err = __gue_build_header(skb, e, protocol, &sport, type);
  873. if (err)
  874. return err;
  875. fou_build_udp(skb, e, fl4, protocol, sport);
  876. return 0;
  877. }
  878. static int gue_err_proto_handler(int proto, struct sk_buff *skb, u32 info)
  879. {
  880. const struct net_protocol *ipprot = rcu_dereference(inet_protos[proto]);
  881. if (ipprot && ipprot->err_handler) {
  882. if (!ipprot->err_handler(skb, info))
  883. return 0;
  884. }
  885. return -ENOENT;
  886. }
  887. static int gue_err(struct sk_buff *skb, u32 info)
  888. {
  889. int transport_offset = skb_transport_offset(skb);
  890. struct guehdr *guehdr;
  891. size_t len, optlen;
  892. int ret;
  893. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  894. if (!pskb_may_pull(skb, transport_offset + len))
  895. return -EINVAL;
  896. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  897. switch (guehdr->version) {
  898. case 0: /* Full GUE header present */
  899. break;
  900. case 1: {
  901. /* Direct encapsulation of IPv4 or IPv6 */
  902. skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
  903. switch (((struct iphdr *)guehdr)->version) {
  904. case 4:
  905. ret = gue_err_proto_handler(IPPROTO_IPIP, skb, info);
  906. goto out;
  907. #if IS_ENABLED(CONFIG_IPV6)
  908. case 6:
  909. ret = gue_err_proto_handler(IPPROTO_IPV6, skb, info);
  910. goto out;
  911. #endif
  912. default:
  913. ret = -EOPNOTSUPP;
  914. goto out;
  915. }
  916. }
  917. default: /* Undefined version */
  918. return -EOPNOTSUPP;
  919. }
  920. if (guehdr->control)
  921. return -ENOENT;
  922. optlen = guehdr->hlen << 2;
  923. if (!pskb_may_pull(skb, transport_offset + len + optlen))
  924. return -EINVAL;
  925. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  926. if (validate_gue_flags(guehdr, optlen))
  927. return -EINVAL;
  928. /* Handling exceptions for direct UDP encapsulation in GUE would lead to
  929. * recursion. Besides, this kind of encapsulation can't even be
  930. * configured currently. Discard this.
  931. */
  932. if (guehdr->proto_ctype == IPPROTO_UDP ||
  933. guehdr->proto_ctype == IPPROTO_UDPLITE)
  934. return -EOPNOTSUPP;
  935. skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
  936. ret = gue_err_proto_handler(guehdr->proto_ctype, skb, info);
  937. out:
  938. skb_set_transport_header(skb, transport_offset);
  939. return ret;
  940. }
  941. static const struct ip_tunnel_encap_ops fou_iptun_ops = {
  942. .encap_hlen = fou_encap_hlen,
  943. .build_header = fou_build_header,
  944. .err_handler = gue_err,
  945. };
  946. static const struct ip_tunnel_encap_ops gue_iptun_ops = {
  947. .encap_hlen = gue_encap_hlen,
  948. .build_header = gue_build_header,
  949. .err_handler = gue_err,
  950. };
  951. static int ip_tunnel_encap_add_fou_ops(void)
  952. {
  953. int ret;
  954. ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  955. if (ret < 0) {
  956. pr_err("can't add fou ops\n");
  957. return ret;
  958. }
  959. ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  960. if (ret < 0) {
  961. pr_err("can't add gue ops\n");
  962. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  963. return ret;
  964. }
  965. return 0;
  966. }
  967. static void ip_tunnel_encap_del_fou_ops(void)
  968. {
  969. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  970. ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  971. }
  972. #else
  973. static int ip_tunnel_encap_add_fou_ops(void)
  974. {
  975. return 0;
  976. }
  977. static void ip_tunnel_encap_del_fou_ops(void)
  978. {
  979. }
  980. #endif
  981. static __net_init int fou_init_net(struct net *net)
  982. {
  983. struct fou_net *fn = net_generic(net, fou_net_id);
  984. INIT_LIST_HEAD(&fn->fou_list);
  985. mutex_init(&fn->fou_lock);
  986. return 0;
  987. }
  988. static __net_exit void fou_exit_net(struct net *net)
  989. {
  990. struct fou_net *fn = net_generic(net, fou_net_id);
  991. struct fou *fou, *next;
  992. /* Close all the FOU sockets */
  993. mutex_lock(&fn->fou_lock);
  994. list_for_each_entry_safe(fou, next, &fn->fou_list, list)
  995. fou_release(fou);
  996. mutex_unlock(&fn->fou_lock);
  997. }
  998. static struct pernet_operations fou_net_ops = {
  999. .init = fou_init_net,
  1000. .exit = fou_exit_net,
  1001. .id = &fou_net_id,
  1002. .size = sizeof(struct fou_net),
  1003. };
  1004. static int __init fou_init(void)
  1005. {
  1006. int ret;
  1007. ret = register_pernet_device(&fou_net_ops);
  1008. if (ret)
  1009. goto exit;
  1010. ret = genl_register_family(&fou_nl_family);
  1011. if (ret < 0)
  1012. goto unregister;
  1013. ret = ip_tunnel_encap_add_fou_ops();
  1014. if (ret == 0)
  1015. return 0;
  1016. genl_unregister_family(&fou_nl_family);
  1017. unregister:
  1018. unregister_pernet_device(&fou_net_ops);
  1019. exit:
  1020. return ret;
  1021. }
  1022. static void __exit fou_fini(void)
  1023. {
  1024. ip_tunnel_encap_del_fou_ops();
  1025. genl_unregister_family(&fou_nl_family);
  1026. unregister_pernet_device(&fou_net_ops);
  1027. }
  1028. module_init(fou_init);
  1029. module_exit(fou_fini);
  1030. MODULE_AUTHOR("Tom Herbert <[email protected]>");
  1031. MODULE_LICENSE("GPL");
  1032. MODULE_DESCRIPTION("Foo over UDP");