l2tp_ip.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* L2TPv3 IP encapsulation support
  3. *
  4. * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <asm/ioctls.h>
  8. #include <linux/icmp.h>
  9. #include <linux/module.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/random.h>
  12. #include <linux/socket.h>
  13. #include <linux/l2tp.h>
  14. #include <linux/in.h>
  15. #include <net/sock.h>
  16. #include <net/ip.h>
  17. #include <net/icmp.h>
  18. #include <net/udp.h>
  19. #include <net/inet_common.h>
  20. #include <net/tcp_states.h>
  21. #include <net/protocol.h>
  22. #include <net/xfrm.h>
  23. #include "l2tp_core.h"
  24. struct l2tp_ip_sock {
  25. /* inet_sock has to be the first member of l2tp_ip_sock */
  26. struct inet_sock inet;
  27. u32 conn_id;
  28. u32 peer_conn_id;
  29. };
  30. static DEFINE_RWLOCK(l2tp_ip_lock);
  31. static struct hlist_head l2tp_ip_table;
  32. static struct hlist_head l2tp_ip_bind_table;
  33. static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
  34. {
  35. return (struct l2tp_ip_sock *)sk;
  36. }
  37. static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
  38. __be32 raddr, int dif, u32 tunnel_id)
  39. {
  40. struct sock *sk;
  41. sk_for_each_bound(sk, &l2tp_ip_bind_table) {
  42. const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
  43. const struct inet_sock *inet = inet_sk(sk);
  44. int bound_dev_if;
  45. if (!net_eq(sock_net(sk), net))
  46. continue;
  47. bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
  48. if (bound_dev_if && dif && bound_dev_if != dif)
  49. continue;
  50. if (inet->inet_rcv_saddr && laddr &&
  51. inet->inet_rcv_saddr != laddr)
  52. continue;
  53. if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
  54. continue;
  55. if (l2tp->conn_id != tunnel_id)
  56. continue;
  57. goto found;
  58. }
  59. sk = NULL;
  60. found:
  61. return sk;
  62. }
  63. /* When processing receive frames, there are two cases to
  64. * consider. Data frames consist of a non-zero session-id and an
  65. * optional cookie. Control frames consist of a regular L2TP header
  66. * preceded by 32-bits of zeros.
  67. *
  68. * L2TPv3 Session Header Over IP
  69. *
  70. * 0 1 2 3
  71. * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  72. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  73. * | Session ID |
  74. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  75. * | Cookie (optional, maximum 64 bits)...
  76. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  77. * |
  78. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  79. *
  80. * L2TPv3 Control Message Header Over IP
  81. *
  82. * 0 1 2 3
  83. * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  84. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  85. * | (32 bits of zeros) |
  86. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  87. * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
  88. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  89. * | Control Connection ID |
  90. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  91. * | Ns | Nr |
  92. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  93. *
  94. * All control frames are passed to userspace.
  95. */
  96. static int l2tp_ip_recv(struct sk_buff *skb)
  97. {
  98. struct net *net = dev_net(skb->dev);
  99. struct sock *sk;
  100. u32 session_id;
  101. u32 tunnel_id;
  102. unsigned char *ptr, *optr;
  103. struct l2tp_session *session;
  104. struct l2tp_tunnel *tunnel = NULL;
  105. struct iphdr *iph;
  106. if (!pskb_may_pull(skb, 4))
  107. goto discard;
  108. /* Point to L2TP header */
  109. optr = skb->data;
  110. ptr = skb->data;
  111. session_id = ntohl(*((__be32 *)ptr));
  112. ptr += 4;
  113. /* RFC3931: L2TP/IP packets have the first 4 bytes containing
  114. * the session_id. If it is 0, the packet is a L2TP control
  115. * frame and the session_id value can be discarded.
  116. */
  117. if (session_id == 0) {
  118. __skb_pull(skb, 4);
  119. goto pass_up;
  120. }
  121. /* Ok, this is a data packet. Lookup the session. */
  122. session = l2tp_session_get(net, session_id);
  123. if (!session)
  124. goto discard;
  125. tunnel = session->tunnel;
  126. if (!tunnel)
  127. goto discard_sess;
  128. if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
  129. goto discard_sess;
  130. l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
  131. l2tp_session_dec_refcount(session);
  132. return 0;
  133. pass_up:
  134. /* Get the tunnel_id from the L2TP header */
  135. if (!pskb_may_pull(skb, 12))
  136. goto discard;
  137. if ((skb->data[0] & 0xc0) != 0xc0)
  138. goto discard;
  139. tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
  140. iph = (struct iphdr *)skb_network_header(skb);
  141. read_lock_bh(&l2tp_ip_lock);
  142. sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
  143. tunnel_id);
  144. if (!sk) {
  145. read_unlock_bh(&l2tp_ip_lock);
  146. goto discard;
  147. }
  148. sock_hold(sk);
  149. read_unlock_bh(&l2tp_ip_lock);
  150. if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
  151. goto discard_put;
  152. nf_reset_ct(skb);
  153. return sk_receive_skb(sk, skb, 1);
  154. discard_sess:
  155. l2tp_session_dec_refcount(session);
  156. goto discard;
  157. discard_put:
  158. sock_put(sk);
  159. discard:
  160. kfree_skb(skb);
  161. return 0;
  162. }
  163. static int l2tp_ip_hash(struct sock *sk)
  164. {
  165. if (sk_unhashed(sk)) {
  166. write_lock_bh(&l2tp_ip_lock);
  167. sk_add_node(sk, &l2tp_ip_table);
  168. write_unlock_bh(&l2tp_ip_lock);
  169. }
  170. return 0;
  171. }
  172. static void l2tp_ip_unhash(struct sock *sk)
  173. {
  174. if (sk_unhashed(sk))
  175. return;
  176. write_lock_bh(&l2tp_ip_lock);
  177. sk_del_node_init(sk);
  178. write_unlock_bh(&l2tp_ip_lock);
  179. }
  180. static int l2tp_ip_open(struct sock *sk)
  181. {
  182. /* Prevent autobind. We don't have ports. */
  183. inet_sk(sk)->inet_num = IPPROTO_L2TP;
  184. l2tp_ip_hash(sk);
  185. return 0;
  186. }
  187. static void l2tp_ip_close(struct sock *sk, long timeout)
  188. {
  189. write_lock_bh(&l2tp_ip_lock);
  190. hlist_del_init(&sk->sk_bind_node);
  191. sk_del_node_init(sk);
  192. write_unlock_bh(&l2tp_ip_lock);
  193. sk_common_release(sk);
  194. }
  195. static void l2tp_ip_destroy_sock(struct sock *sk)
  196. {
  197. struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
  198. struct sk_buff *skb;
  199. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
  200. kfree_skb(skb);
  201. if (tunnel)
  202. l2tp_tunnel_delete(tunnel);
  203. }
  204. static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
  205. {
  206. struct inet_sock *inet = inet_sk(sk);
  207. struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
  208. struct net *net = sock_net(sk);
  209. int ret;
  210. int chk_addr_ret;
  211. if (addr_len < sizeof(struct sockaddr_l2tpip))
  212. return -EINVAL;
  213. if (addr->l2tp_family != AF_INET)
  214. return -EINVAL;
  215. lock_sock(sk);
  216. ret = -EINVAL;
  217. if (!sock_flag(sk, SOCK_ZAPPED))
  218. goto out;
  219. if (sk->sk_state != TCP_CLOSE)
  220. goto out;
  221. chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
  222. ret = -EADDRNOTAVAIL;
  223. if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
  224. chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
  225. goto out;
  226. if (addr->l2tp_addr.s_addr) {
  227. inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
  228. inet->inet_saddr = addr->l2tp_addr.s_addr;
  229. }
  230. if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
  231. inet->inet_saddr = 0; /* Use device */
  232. write_lock_bh(&l2tp_ip_lock);
  233. if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
  234. sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
  235. write_unlock_bh(&l2tp_ip_lock);
  236. ret = -EADDRINUSE;
  237. goto out;
  238. }
  239. sk_dst_reset(sk);
  240. l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
  241. sk_add_bind_node(sk, &l2tp_ip_bind_table);
  242. sk_del_node_init(sk);
  243. write_unlock_bh(&l2tp_ip_lock);
  244. ret = 0;
  245. sock_reset_flag(sk, SOCK_ZAPPED);
  246. out:
  247. release_sock(sk);
  248. return ret;
  249. }
  250. static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
  251. {
  252. struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
  253. int rc;
  254. if (addr_len < sizeof(*lsa))
  255. return -EINVAL;
  256. if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
  257. return -EINVAL;
  258. lock_sock(sk);
  259. /* Must bind first - autobinding does not work */
  260. if (sock_flag(sk, SOCK_ZAPPED)) {
  261. rc = -EINVAL;
  262. goto out_sk;
  263. }
  264. rc = __ip4_datagram_connect(sk, uaddr, addr_len);
  265. if (rc < 0)
  266. goto out_sk;
  267. l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
  268. write_lock_bh(&l2tp_ip_lock);
  269. hlist_del_init(&sk->sk_bind_node);
  270. sk_add_bind_node(sk, &l2tp_ip_bind_table);
  271. write_unlock_bh(&l2tp_ip_lock);
  272. out_sk:
  273. release_sock(sk);
  274. return rc;
  275. }
  276. static int l2tp_ip_disconnect(struct sock *sk, int flags)
  277. {
  278. if (sock_flag(sk, SOCK_ZAPPED))
  279. return 0;
  280. return __udp_disconnect(sk, flags);
  281. }
  282. static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
  283. int peer)
  284. {
  285. struct sock *sk = sock->sk;
  286. struct inet_sock *inet = inet_sk(sk);
  287. struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
  288. struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
  289. memset(lsa, 0, sizeof(*lsa));
  290. lsa->l2tp_family = AF_INET;
  291. if (peer) {
  292. if (!inet->inet_dport)
  293. return -ENOTCONN;
  294. lsa->l2tp_conn_id = lsk->peer_conn_id;
  295. lsa->l2tp_addr.s_addr = inet->inet_daddr;
  296. } else {
  297. __be32 addr = inet->inet_rcv_saddr;
  298. if (!addr)
  299. addr = inet->inet_saddr;
  300. lsa->l2tp_conn_id = lsk->conn_id;
  301. lsa->l2tp_addr.s_addr = addr;
  302. }
  303. return sizeof(*lsa);
  304. }
  305. static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
  306. {
  307. int rc;
  308. /* Charge it to the socket, dropping if the queue is full. */
  309. rc = sock_queue_rcv_skb(sk, skb);
  310. if (rc < 0)
  311. goto drop;
  312. return 0;
  313. drop:
  314. IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
  315. kfree_skb(skb);
  316. return 0;
  317. }
  318. /* Userspace will call sendmsg() on the tunnel socket to send L2TP
  319. * control frames.
  320. */
  321. static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
  322. {
  323. struct sk_buff *skb;
  324. int rc;
  325. struct inet_sock *inet = inet_sk(sk);
  326. struct rtable *rt = NULL;
  327. struct flowi4 *fl4;
  328. int connected = 0;
  329. __be32 daddr;
  330. lock_sock(sk);
  331. rc = -ENOTCONN;
  332. if (sock_flag(sk, SOCK_DEAD))
  333. goto out;
  334. /* Get and verify the address. */
  335. if (msg->msg_name) {
  336. DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
  337. rc = -EINVAL;
  338. if (msg->msg_namelen < sizeof(*lip))
  339. goto out;
  340. if (lip->l2tp_family != AF_INET) {
  341. rc = -EAFNOSUPPORT;
  342. if (lip->l2tp_family != AF_UNSPEC)
  343. goto out;
  344. }
  345. daddr = lip->l2tp_addr.s_addr;
  346. } else {
  347. rc = -EDESTADDRREQ;
  348. if (sk->sk_state != TCP_ESTABLISHED)
  349. goto out;
  350. daddr = inet->inet_daddr;
  351. connected = 1;
  352. }
  353. /* Allocate a socket buffer */
  354. rc = -ENOMEM;
  355. skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
  356. 4 + len, 0, GFP_KERNEL);
  357. if (!skb)
  358. goto error;
  359. /* Reserve space for headers, putting IP header on 4-byte boundary. */
  360. skb_reserve(skb, 2 + NET_SKB_PAD);
  361. skb_reset_network_header(skb);
  362. skb_reserve(skb, sizeof(struct iphdr));
  363. skb_reset_transport_header(skb);
  364. /* Insert 0 session_id */
  365. *((__be32 *)skb_put(skb, 4)) = 0;
  366. /* Copy user data into skb */
  367. rc = memcpy_from_msg(skb_put(skb, len), msg, len);
  368. if (rc < 0) {
  369. kfree_skb(skb);
  370. goto error;
  371. }
  372. fl4 = &inet->cork.fl.u.ip4;
  373. if (connected)
  374. rt = (struct rtable *)__sk_dst_check(sk, 0);
  375. rcu_read_lock();
  376. if (!rt) {
  377. const struct ip_options_rcu *inet_opt;
  378. inet_opt = rcu_dereference(inet->inet_opt);
  379. /* Use correct destination address if we have options. */
  380. if (inet_opt && inet_opt->opt.srr)
  381. daddr = inet_opt->opt.faddr;
  382. /* If this fails, retransmit mechanism of transport layer will
  383. * keep trying until route appears or the connection times
  384. * itself out.
  385. */
  386. rt = ip_route_output_ports(sock_net(sk), fl4, sk,
  387. daddr, inet->inet_saddr,
  388. inet->inet_dport, inet->inet_sport,
  389. sk->sk_protocol, RT_CONN_FLAGS(sk),
  390. sk->sk_bound_dev_if);
  391. if (IS_ERR(rt))
  392. goto no_route;
  393. if (connected) {
  394. sk_setup_caps(sk, &rt->dst);
  395. } else {
  396. skb_dst_set(skb, &rt->dst);
  397. goto xmit;
  398. }
  399. }
  400. /* We don't need to clone dst here, it is guaranteed to not disappear.
  401. * __dev_xmit_skb() might force a refcount if needed.
  402. */
  403. skb_dst_set_noref(skb, &rt->dst);
  404. xmit:
  405. /* Queue the packet to IP for output */
  406. rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
  407. rcu_read_unlock();
  408. error:
  409. if (rc >= 0)
  410. rc = len;
  411. out:
  412. release_sock(sk);
  413. return rc;
  414. no_route:
  415. rcu_read_unlock();
  416. IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
  417. kfree_skb(skb);
  418. rc = -EHOSTUNREACH;
  419. goto out;
  420. }
  421. static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
  422. size_t len, int flags, int *addr_len)
  423. {
  424. struct inet_sock *inet = inet_sk(sk);
  425. size_t copied = 0;
  426. int err = -EOPNOTSUPP;
  427. DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
  428. struct sk_buff *skb;
  429. if (flags & MSG_OOB)
  430. goto out;
  431. skb = skb_recv_datagram(sk, flags, &err);
  432. if (!skb)
  433. goto out;
  434. copied = skb->len;
  435. if (len < copied) {
  436. msg->msg_flags |= MSG_TRUNC;
  437. copied = len;
  438. }
  439. err = skb_copy_datagram_msg(skb, 0, msg, copied);
  440. if (err)
  441. goto done;
  442. sock_recv_timestamp(msg, sk, skb);
  443. /* Copy the address. */
  444. if (sin) {
  445. sin->sin_family = AF_INET;
  446. sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  447. sin->sin_port = 0;
  448. memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
  449. *addr_len = sizeof(*sin);
  450. }
  451. if (inet->cmsg_flags)
  452. ip_cmsg_recv(msg, skb);
  453. if (flags & MSG_TRUNC)
  454. copied = skb->len;
  455. done:
  456. skb_free_datagram(sk, skb);
  457. out:
  458. return err ? err : copied;
  459. }
  460. int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  461. {
  462. struct sk_buff *skb;
  463. int amount;
  464. switch (cmd) {
  465. case SIOCOUTQ:
  466. amount = sk_wmem_alloc_get(sk);
  467. break;
  468. case SIOCINQ:
  469. spin_lock_bh(&sk->sk_receive_queue.lock);
  470. skb = skb_peek(&sk->sk_receive_queue);
  471. amount = skb ? skb->len : 0;
  472. spin_unlock_bh(&sk->sk_receive_queue.lock);
  473. break;
  474. default:
  475. return -ENOIOCTLCMD;
  476. }
  477. return put_user(amount, (int __user *)arg);
  478. }
  479. EXPORT_SYMBOL_GPL(l2tp_ioctl);
  480. static struct proto l2tp_ip_prot = {
  481. .name = "L2TP/IP",
  482. .owner = THIS_MODULE,
  483. .init = l2tp_ip_open,
  484. .close = l2tp_ip_close,
  485. .bind = l2tp_ip_bind,
  486. .connect = l2tp_ip_connect,
  487. .disconnect = l2tp_ip_disconnect,
  488. .ioctl = l2tp_ioctl,
  489. .destroy = l2tp_ip_destroy_sock,
  490. .setsockopt = ip_setsockopt,
  491. .getsockopt = ip_getsockopt,
  492. .sendmsg = l2tp_ip_sendmsg,
  493. .recvmsg = l2tp_ip_recvmsg,
  494. .backlog_rcv = l2tp_ip_backlog_recv,
  495. .hash = l2tp_ip_hash,
  496. .unhash = l2tp_ip_unhash,
  497. .obj_size = sizeof(struct l2tp_ip_sock),
  498. };
  499. static const struct proto_ops l2tp_ip_ops = {
  500. .family = PF_INET,
  501. .owner = THIS_MODULE,
  502. .release = inet_release,
  503. .bind = inet_bind,
  504. .connect = inet_dgram_connect,
  505. .socketpair = sock_no_socketpair,
  506. .accept = sock_no_accept,
  507. .getname = l2tp_ip_getname,
  508. .poll = datagram_poll,
  509. .ioctl = inet_ioctl,
  510. .gettstamp = sock_gettstamp,
  511. .listen = sock_no_listen,
  512. .shutdown = inet_shutdown,
  513. .setsockopt = sock_common_setsockopt,
  514. .getsockopt = sock_common_getsockopt,
  515. .sendmsg = inet_sendmsg,
  516. .recvmsg = sock_common_recvmsg,
  517. .mmap = sock_no_mmap,
  518. .sendpage = sock_no_sendpage,
  519. };
  520. static struct inet_protosw l2tp_ip_protosw = {
  521. .type = SOCK_DGRAM,
  522. .protocol = IPPROTO_L2TP,
  523. .prot = &l2tp_ip_prot,
  524. .ops = &l2tp_ip_ops,
  525. };
  526. static struct net_protocol l2tp_ip_protocol __read_mostly = {
  527. .handler = l2tp_ip_recv,
  528. };
  529. static int __init l2tp_ip_init(void)
  530. {
  531. int err;
  532. pr_info("L2TP IP encapsulation support (L2TPv3)\n");
  533. err = proto_register(&l2tp_ip_prot, 1);
  534. if (err != 0)
  535. goto out;
  536. err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
  537. if (err)
  538. goto out1;
  539. inet_register_protosw(&l2tp_ip_protosw);
  540. return 0;
  541. out1:
  542. proto_unregister(&l2tp_ip_prot);
  543. out:
  544. return err;
  545. }
  546. static void __exit l2tp_ip_exit(void)
  547. {
  548. inet_unregister_protosw(&l2tp_ip_protosw);
  549. inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
  550. proto_unregister(&l2tp_ip_prot);
  551. }
  552. module_init(l2tp_ip_init);
  553. module_exit(l2tp_ip_exit);
  554. MODULE_LICENSE("GPL");
  555. MODULE_AUTHOR("James Chapman <[email protected]>");
  556. MODULE_DESCRIPTION("L2TP over IP");
  557. MODULE_VERSION("1.0");
  558. /* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
  559. * because __stringify doesn't like enums
  560. */
  561. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
  562. MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);