socket.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
  4. */
  5. #include "device.h"
  6. #include "peer.h"
  7. #include "socket.h"
  8. #include "queueing.h"
  9. #include "messages.h"
  10. #include <linux/ctype.h>
  11. #include <linux/net.h>
  12. #include <linux/if_vlan.h>
  13. #include <linux/if_ether.h>
  14. #include <linux/inetdevice.h>
  15. #include <net/udp_tunnel.h>
  16. #include <net/ipv6.h>
  17. static int send4(struct wg_device *wg, struct sk_buff *skb,
  18. struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
  19. {
  20. struct flowi4 fl = {
  21. .saddr = endpoint->src4.s_addr,
  22. .daddr = endpoint->addr4.sin_addr.s_addr,
  23. .fl4_dport = endpoint->addr4.sin_port,
  24. .flowi4_mark = wg->fwmark,
  25. .flowi4_proto = IPPROTO_UDP
  26. };
  27. struct rtable *rt = NULL;
  28. struct sock *sock;
  29. int ret = 0;
  30. skb_mark_not_on_list(skb);
  31. skb->dev = wg->dev;
  32. skb->mark = wg->fwmark;
  33. rcu_read_lock_bh();
  34. sock = rcu_dereference_bh(wg->sock4);
  35. if (unlikely(!sock)) {
  36. ret = -ENONET;
  37. goto err;
  38. }
  39. fl.fl4_sport = inet_sk(sock)->inet_sport;
  40. if (cache)
  41. rt = dst_cache_get_ip4(cache, &fl.saddr);
  42. if (!rt) {
  43. security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl));
  44. if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
  45. fl.saddr, RT_SCOPE_HOST))) {
  46. endpoint->src4.s_addr = 0;
  47. endpoint->src_if4 = 0;
  48. fl.saddr = 0;
  49. if (cache)
  50. dst_cache_reset(cache);
  51. }
  52. rt = ip_route_output_flow(sock_net(sock), &fl, sock);
  53. if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) &&
  54. PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
  55. rt->dst.dev->ifindex != endpoint->src_if4)))) {
  56. endpoint->src4.s_addr = 0;
  57. endpoint->src_if4 = 0;
  58. fl.saddr = 0;
  59. if (cache)
  60. dst_cache_reset(cache);
  61. if (!IS_ERR(rt))
  62. ip_rt_put(rt);
  63. rt = ip_route_output_flow(sock_net(sock), &fl, sock);
  64. }
  65. if (IS_ERR(rt)) {
  66. ret = PTR_ERR(rt);
  67. net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
  68. wg->dev->name, &endpoint->addr, ret);
  69. goto err;
  70. }
  71. if (cache)
  72. dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
  73. }
  74. skb->ignore_df = 1;
  75. udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds,
  76. ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
  77. fl.fl4_dport, false, false);
  78. goto out;
  79. err:
  80. kfree_skb(skb);
  81. out:
  82. rcu_read_unlock_bh();
  83. return ret;
  84. }
  85. static int send6(struct wg_device *wg, struct sk_buff *skb,
  86. struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
  87. {
  88. #if IS_ENABLED(CONFIG_IPV6)
  89. struct flowi6 fl = {
  90. .saddr = endpoint->src6,
  91. .daddr = endpoint->addr6.sin6_addr,
  92. .fl6_dport = endpoint->addr6.sin6_port,
  93. .flowi6_mark = wg->fwmark,
  94. .flowi6_oif = endpoint->addr6.sin6_scope_id,
  95. .flowi6_proto = IPPROTO_UDP
  96. /* TODO: addr->sin6_flowinfo */
  97. };
  98. struct dst_entry *dst = NULL;
  99. struct sock *sock;
  100. int ret = 0;
  101. skb_mark_not_on_list(skb);
  102. skb->dev = wg->dev;
  103. skb->mark = wg->fwmark;
  104. rcu_read_lock_bh();
  105. sock = rcu_dereference_bh(wg->sock6);
  106. if (unlikely(!sock)) {
  107. ret = -ENONET;
  108. goto err;
  109. }
  110. fl.fl6_sport = inet_sk(sock)->inet_sport;
  111. if (cache)
  112. dst = dst_cache_get_ip6(cache, &fl.saddr);
  113. if (!dst) {
  114. security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl));
  115. if (unlikely(!ipv6_addr_any(&fl.saddr) &&
  116. !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
  117. endpoint->src6 = fl.saddr = in6addr_any;
  118. if (cache)
  119. dst_cache_reset(cache);
  120. }
  121. dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl,
  122. NULL);
  123. if (IS_ERR(dst)) {
  124. ret = PTR_ERR(dst);
  125. net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
  126. wg->dev->name, &endpoint->addr, ret);
  127. goto err;
  128. }
  129. if (cache)
  130. dst_cache_set_ip6(cache, dst, &fl.saddr);
  131. }
  132. skb->ignore_df = 1;
  133. udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
  134. ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
  135. fl.fl6_dport, false);
  136. goto out;
  137. err:
  138. kfree_skb(skb);
  139. out:
  140. rcu_read_unlock_bh();
  141. return ret;
  142. #else
  143. kfree_skb(skb);
  144. return -EAFNOSUPPORT;
  145. #endif
  146. }
  147. int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds)
  148. {
  149. size_t skb_len = skb->len;
  150. int ret = -EAFNOSUPPORT;
  151. read_lock_bh(&peer->endpoint_lock);
  152. if (peer->endpoint.addr.sa_family == AF_INET)
  153. ret = send4(peer->device, skb, &peer->endpoint, ds,
  154. &peer->endpoint_cache);
  155. else if (peer->endpoint.addr.sa_family == AF_INET6)
  156. ret = send6(peer->device, skb, &peer->endpoint, ds,
  157. &peer->endpoint_cache);
  158. else
  159. dev_kfree_skb(skb);
  160. if (likely(!ret))
  161. peer->tx_bytes += skb_len;
  162. read_unlock_bh(&peer->endpoint_lock);
  163. return ret;
  164. }
  165. int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer,
  166. size_t len, u8 ds)
  167. {
  168. struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
  169. if (unlikely(!skb))
  170. return -ENOMEM;
  171. skb_reserve(skb, SKB_HEADER_LEN);
  172. skb_set_inner_network_header(skb, 0);
  173. skb_put_data(skb, buffer, len);
  174. return wg_socket_send_skb_to_peer(peer, skb, ds);
  175. }
  176. int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
  177. struct sk_buff *in_skb, void *buffer,
  178. size_t len)
  179. {
  180. int ret = 0;
  181. struct sk_buff *skb;
  182. struct endpoint endpoint;
  183. if (unlikely(!in_skb))
  184. return -EINVAL;
  185. ret = wg_socket_endpoint_from_skb(&endpoint, in_skb);
  186. if (unlikely(ret < 0))
  187. return ret;
  188. skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
  189. if (unlikely(!skb))
  190. return -ENOMEM;
  191. skb_reserve(skb, SKB_HEADER_LEN);
  192. skb_set_inner_network_header(skb, 0);
  193. skb_put_data(skb, buffer, len);
  194. if (endpoint.addr.sa_family == AF_INET)
  195. ret = send4(wg, skb, &endpoint, 0, NULL);
  196. else if (endpoint.addr.sa_family == AF_INET6)
  197. ret = send6(wg, skb, &endpoint, 0, NULL);
  198. /* No other possibilities if the endpoint is valid, which it is,
  199. * as we checked above.
  200. */
  201. return ret;
  202. }
  203. int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
  204. const struct sk_buff *skb)
  205. {
  206. memset(endpoint, 0, sizeof(*endpoint));
  207. if (skb->protocol == htons(ETH_P_IP)) {
  208. endpoint->addr4.sin_family = AF_INET;
  209. endpoint->addr4.sin_port = udp_hdr(skb)->source;
  210. endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
  211. endpoint->src4.s_addr = ip_hdr(skb)->daddr;
  212. endpoint->src_if4 = skb->skb_iif;
  213. } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
  214. endpoint->addr6.sin6_family = AF_INET6;
  215. endpoint->addr6.sin6_port = udp_hdr(skb)->source;
  216. endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
  217. endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id(
  218. &ipv6_hdr(skb)->saddr, skb->skb_iif);
  219. endpoint->src6 = ipv6_hdr(skb)->daddr;
  220. } else {
  221. return -EINVAL;
  222. }
  223. return 0;
  224. }
  225. static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b)
  226. {
  227. return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET &&
  228. a->addr4.sin_port == b->addr4.sin_port &&
  229. a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr &&
  230. a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) ||
  231. (a->addr.sa_family == AF_INET6 &&
  232. b->addr.sa_family == AF_INET6 &&
  233. a->addr6.sin6_port == b->addr6.sin6_port &&
  234. ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) &&
  235. a->addr6.sin6_scope_id == b->addr6.sin6_scope_id &&
  236. ipv6_addr_equal(&a->src6, &b->src6)) ||
  237. unlikely(!a->addr.sa_family && !b->addr.sa_family);
  238. }
  239. void wg_socket_set_peer_endpoint(struct wg_peer *peer,
  240. const struct endpoint *endpoint)
  241. {
  242. /* First we check unlocked, in order to optimize, since it's pretty rare
  243. * that an endpoint will change. If we happen to be mid-write, and two
  244. * CPUs wind up writing the same thing or something slightly different,
  245. * it doesn't really matter much either.
  246. */
  247. if (endpoint_eq(endpoint, &peer->endpoint))
  248. return;
  249. write_lock_bh(&peer->endpoint_lock);
  250. if (endpoint->addr.sa_family == AF_INET) {
  251. peer->endpoint.addr4 = endpoint->addr4;
  252. peer->endpoint.src4 = endpoint->src4;
  253. peer->endpoint.src_if4 = endpoint->src_if4;
  254. } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) {
  255. peer->endpoint.addr6 = endpoint->addr6;
  256. peer->endpoint.src6 = endpoint->src6;
  257. } else {
  258. goto out;
  259. }
  260. dst_cache_reset(&peer->endpoint_cache);
  261. out:
  262. write_unlock_bh(&peer->endpoint_lock);
  263. }
  264. void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
  265. const struct sk_buff *skb)
  266. {
  267. struct endpoint endpoint;
  268. if (!wg_socket_endpoint_from_skb(&endpoint, skb))
  269. wg_socket_set_peer_endpoint(peer, &endpoint);
  270. }
  271. void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
  272. {
  273. write_lock_bh(&peer->endpoint_lock);
  274. memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
  275. dst_cache_reset_now(&peer->endpoint_cache);
  276. write_unlock_bh(&peer->endpoint_lock);
  277. }
  278. static int wg_receive(struct sock *sk, struct sk_buff *skb)
  279. {
  280. struct wg_device *wg;
  281. if (unlikely(!sk))
  282. goto err;
  283. wg = sk->sk_user_data;
  284. if (unlikely(!wg))
  285. goto err;
  286. skb_mark_not_on_list(skb);
  287. wg_packet_receive(wg, skb);
  288. return 0;
  289. err:
  290. kfree_skb(skb);
  291. return 0;
  292. }
  293. static void sock_free(struct sock *sock)
  294. {
  295. if (unlikely(!sock))
  296. return;
  297. sk_clear_memalloc(sock);
  298. udp_tunnel_sock_release(sock->sk_socket);
  299. }
  300. static void set_sock_opts(struct socket *sock)
  301. {
  302. sock->sk->sk_allocation = GFP_ATOMIC;
  303. sock->sk->sk_sndbuf = INT_MAX;
  304. sk_set_memalloc(sock->sk);
  305. }
  306. int wg_socket_init(struct wg_device *wg, u16 port)
  307. {
  308. struct net *net;
  309. int ret;
  310. struct udp_tunnel_sock_cfg cfg = {
  311. .sk_user_data = wg,
  312. .encap_type = 1,
  313. .encap_rcv = wg_receive
  314. };
  315. struct socket *new4 = NULL, *new6 = NULL;
  316. struct udp_port_cfg port4 = {
  317. .family = AF_INET,
  318. .local_ip.s_addr = htonl(INADDR_ANY),
  319. .local_udp_port = htons(port),
  320. .use_udp_checksums = true
  321. };
  322. #if IS_ENABLED(CONFIG_IPV6)
  323. int retries = 0;
  324. struct udp_port_cfg port6 = {
  325. .family = AF_INET6,
  326. .local_ip6 = IN6ADDR_ANY_INIT,
  327. .use_udp6_tx_checksums = true,
  328. .use_udp6_rx_checksums = true,
  329. .ipv6_v6only = true
  330. };
  331. #endif
  332. rcu_read_lock();
  333. net = rcu_dereference(wg->creating_net);
  334. net = net ? maybe_get_net(net) : NULL;
  335. rcu_read_unlock();
  336. if (unlikely(!net))
  337. return -ENONET;
  338. #if IS_ENABLED(CONFIG_IPV6)
  339. retry:
  340. #endif
  341. ret = udp_sock_create(net, &port4, &new4);
  342. if (ret < 0) {
  343. pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
  344. goto out;
  345. }
  346. set_sock_opts(new4);
  347. setup_udp_tunnel_sock(net, new4, &cfg);
  348. #if IS_ENABLED(CONFIG_IPV6)
  349. if (ipv6_mod_enabled()) {
  350. port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
  351. ret = udp_sock_create(net, &port6, &new6);
  352. if (ret < 0) {
  353. udp_tunnel_sock_release(new4);
  354. if (ret == -EADDRINUSE && !port && retries++ < 100)
  355. goto retry;
  356. pr_err("%s: Could not create IPv6 socket\n",
  357. wg->dev->name);
  358. goto out;
  359. }
  360. set_sock_opts(new6);
  361. setup_udp_tunnel_sock(net, new6, &cfg);
  362. }
  363. #endif
  364. wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
  365. ret = 0;
  366. out:
  367. put_net(net);
  368. return ret;
  369. }
  370. void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
  371. struct sock *new6)
  372. {
  373. struct sock *old4, *old6;
  374. mutex_lock(&wg->socket_update_lock);
  375. old4 = rcu_dereference_protected(wg->sock4,
  376. lockdep_is_held(&wg->socket_update_lock));
  377. old6 = rcu_dereference_protected(wg->sock6,
  378. lockdep_is_held(&wg->socket_update_lock));
  379. rcu_assign_pointer(wg->sock4, new4);
  380. rcu_assign_pointer(wg->sock6, new6);
  381. if (new4)
  382. wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
  383. mutex_unlock(&wg->socket_update_lock);
  384. synchronize_net();
  385. sock_free(old4);
  386. sock_free(old6);
  387. }