ip6_input.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * IPv6 input
  4. * Linux INET6 implementation
  5. *
  6. * Authors:
  7. * Pedro Roque <[email protected]>
  8. * Ian P. Morris <[email protected]>
  9. *
  10. * Based in linux/net/ipv4/ip_input.c
  11. */
  12. /* Changes
  13. *
  14. * Mitsuru KANDA @USAGI and
  15. * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
  16. */
  17. #include <linux/errno.h>
  18. #include <linux/types.h>
  19. #include <linux/socket.h>
  20. #include <linux/sockios.h>
  21. #include <linux/net.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/in6.h>
  24. #include <linux/icmpv6.h>
  25. #include <linux/mroute6.h>
  26. #include <linux/slab.h>
  27. #include <linux/indirect_call_wrapper.h>
  28. #include <linux/netfilter.h>
  29. #include <linux/netfilter_ipv6.h>
  30. #include <net/sock.h>
  31. #include <net/snmp.h>
  32. #include <net/udp.h>
  33. #include <net/ipv6.h>
  34. #include <net/protocol.h>
  35. #include <net/transp_v6.h>
  36. #include <net/rawv6.h>
  37. #include <net/ndisc.h>
  38. #include <net/ip6_route.h>
  39. #include <net/addrconf.h>
  40. #include <net/xfrm.h>
  41. #include <net/inet_ecn.h>
  42. #include <net/dst_metadata.h>
  43. static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
  44. struct sk_buff *skb)
  45. {
  46. if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
  47. !skb_dst(skb) && !skb->sk) {
  48. switch (ipv6_hdr(skb)->nexthdr) {
  49. case IPPROTO_TCP:
  50. if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux))
  51. tcp_v6_early_demux(skb);
  52. break;
  53. case IPPROTO_UDP:
  54. if (READ_ONCE(net->ipv4.sysctl_udp_early_demux))
  55. udp_v6_early_demux(skb);
  56. break;
  57. }
  58. }
  59. if (!skb_valid_dst(skb))
  60. ip6_route_input(skb);
  61. }
  62. int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
  63. {
  64. /* if ingress device is enslaved to an L3 master device pass the
  65. * skb to its handler for processing
  66. */
  67. skb = l3mdev_ip6_rcv(skb);
  68. if (!skb)
  69. return NET_RX_SUCCESS;
  70. ip6_rcv_finish_core(net, sk, skb);
  71. return dst_input(skb);
  72. }
  73. static void ip6_sublist_rcv_finish(struct list_head *head)
  74. {
  75. struct sk_buff *skb, *next;
  76. list_for_each_entry_safe(skb, next, head, list) {
  77. skb_list_del_init(skb);
  78. dst_input(skb);
  79. }
  80. }
  81. static bool ip6_can_use_hint(const struct sk_buff *skb,
  82. const struct sk_buff *hint)
  83. {
  84. return hint && !skb_dst(skb) &&
  85. ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr);
  86. }
  87. static struct sk_buff *ip6_extract_route_hint(const struct net *net,
  88. struct sk_buff *skb)
  89. {
  90. if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) ||
  91. IP6CB(skb)->flags & IP6SKB_MULTIPATH)
  92. return NULL;
  93. return skb;
  94. }
  95. static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
  96. struct list_head *head)
  97. {
  98. struct sk_buff *skb, *next, *hint = NULL;
  99. struct dst_entry *curr_dst = NULL;
  100. struct list_head sublist;
  101. INIT_LIST_HEAD(&sublist);
  102. list_for_each_entry_safe(skb, next, head, list) {
  103. struct dst_entry *dst;
  104. skb_list_del_init(skb);
  105. /* if ingress device is enslaved to an L3 master device pass the
  106. * skb to its handler for processing
  107. */
  108. skb = l3mdev_ip6_rcv(skb);
  109. if (!skb)
  110. continue;
  111. if (ip6_can_use_hint(skb, hint))
  112. skb_dst_copy(skb, hint);
  113. else
  114. ip6_rcv_finish_core(net, sk, skb);
  115. dst = skb_dst(skb);
  116. if (curr_dst != dst) {
  117. hint = ip6_extract_route_hint(net, skb);
  118. /* dispatch old sublist */
  119. if (!list_empty(&sublist))
  120. ip6_sublist_rcv_finish(&sublist);
  121. /* start new sublist */
  122. INIT_LIST_HEAD(&sublist);
  123. curr_dst = dst;
  124. }
  125. list_add_tail(&skb->list, &sublist);
  126. }
  127. /* dispatch final sublist */
  128. ip6_sublist_rcv_finish(&sublist);
  129. }
  130. static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
  131. struct net *net)
  132. {
  133. enum skb_drop_reason reason;
  134. const struct ipv6hdr *hdr;
  135. u32 pkt_len;
  136. struct inet6_dev *idev;
  137. if (skb->pkt_type == PACKET_OTHERHOST) {
  138. dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
  139. kfree_skb_reason(skb, SKB_DROP_REASON_OTHERHOST);
  140. return NULL;
  141. }
  142. rcu_read_lock();
  143. idev = __in6_dev_get(skb->dev);
  144. __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
  145. SKB_DR_SET(reason, NOT_SPECIFIED);
  146. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
  147. !idev || unlikely(idev->cnf.disable_ipv6)) {
  148. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
  149. if (idev && unlikely(idev->cnf.disable_ipv6))
  150. SKB_DR_SET(reason, IPV6DISABLED);
  151. goto drop;
  152. }
  153. memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
  154. /*
  155. * Store incoming device index. When the packet will
  156. * be queued, we cannot refer to skb->dev anymore.
  157. *
  158. * BTW, when we send a packet for our own local address on a
  159. * non-loopback interface (e.g. ethX), it is being delivered
  160. * via the loopback interface (lo) here; skb->dev = loopback_dev.
  161. * It, however, should be considered as if it is being
  162. * arrived via the sending interface (ethX), because of the
  163. * nature of scoping architecture. --yoshfuji
  164. */
  165. IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
  166. if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
  167. goto err;
  168. hdr = ipv6_hdr(skb);
  169. if (hdr->version != 6) {
  170. SKB_DR_SET(reason, UNHANDLED_PROTO);
  171. goto err;
  172. }
  173. __IP6_ADD_STATS(net, idev,
  174. IPSTATS_MIB_NOECTPKTS +
  175. (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
  176. max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
  177. /*
  178. * RFC4291 2.5.3
  179. * The loopback address must not be used as the source address in IPv6
  180. * packets that are sent outside of a single node. [..]
  181. * A packet received on an interface with a destination address
  182. * of loopback must be dropped.
  183. */
  184. if ((ipv6_addr_loopback(&hdr->saddr) ||
  185. ipv6_addr_loopback(&hdr->daddr)) &&
  186. !(dev->flags & IFF_LOOPBACK) &&
  187. !netif_is_l3_master(dev))
  188. goto err;
  189. /* RFC4291 Errata ID: 3480
  190. * Interface-Local scope spans only a single interface on a
  191. * node and is useful only for loopback transmission of
  192. * multicast. Packets with interface-local scope received
  193. * from another node must be discarded.
  194. */
  195. if (!(skb->pkt_type == PACKET_LOOPBACK ||
  196. dev->flags & IFF_LOOPBACK) &&
  197. ipv6_addr_is_multicast(&hdr->daddr) &&
  198. IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
  199. goto err;
  200. /* If enabled, drop unicast packets that were encapsulated in link-layer
  201. * multicast or broadcast to protected against the so-called "hole-196"
  202. * attack in 802.11 wireless.
  203. */
  204. if (!ipv6_addr_is_multicast(&hdr->daddr) &&
  205. (skb->pkt_type == PACKET_BROADCAST ||
  206. skb->pkt_type == PACKET_MULTICAST) &&
  207. idev->cnf.drop_unicast_in_l2_multicast) {
  208. SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
  209. goto err;
  210. }
  211. /* RFC4291 2.7
  212. * Nodes must not originate a packet to a multicast address whose scope
  213. * field contains the reserved value 0; if such a packet is received, it
  214. * must be silently dropped.
  215. */
  216. if (ipv6_addr_is_multicast(&hdr->daddr) &&
  217. IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
  218. goto err;
  219. /*
  220. * RFC4291 2.7
  221. * Multicast addresses must not be used as source addresses in IPv6
  222. * packets or appear in any Routing header.
  223. */
  224. if (ipv6_addr_is_multicast(&hdr->saddr))
  225. goto err;
  226. skb->transport_header = skb->network_header + sizeof(*hdr);
  227. IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
  228. pkt_len = ntohs(hdr->payload_len);
  229. /* pkt_len may be zero if Jumbo payload option is present */
  230. if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
  231. if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
  232. __IP6_INC_STATS(net,
  233. idev, IPSTATS_MIB_INTRUNCATEDPKTS);
  234. SKB_DR_SET(reason, PKT_TOO_SMALL);
  235. goto drop;
  236. }
  237. if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
  238. goto err;
  239. hdr = ipv6_hdr(skb);
  240. }
  241. if (hdr->nexthdr == NEXTHDR_HOP) {
  242. if (ipv6_parse_hopopts(skb) < 0) {
  243. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  244. rcu_read_unlock();
  245. return NULL;
  246. }
  247. }
  248. rcu_read_unlock();
  249. /* Must drop socket now because of tproxy. */
  250. if (!skb_sk_is_prefetched(skb))
  251. skb_orphan(skb);
  252. return skb;
  253. err:
  254. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  255. SKB_DR_OR(reason, IP_INHDR);
  256. drop:
  257. rcu_read_unlock();
  258. kfree_skb_reason(skb, reason);
  259. return NULL;
  260. }
  261. int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
  262. {
  263. struct net *net = dev_net(skb->dev);
  264. skb = ip6_rcv_core(skb, dev, net);
  265. if (skb == NULL)
  266. return NET_RX_DROP;
  267. return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
  268. net, NULL, skb, dev, NULL,
  269. ip6_rcv_finish);
  270. }
  271. static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev,
  272. struct net *net)
  273. {
  274. NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL,
  275. head, dev, NULL, ip6_rcv_finish);
  276. ip6_list_rcv_finish(net, NULL, head);
  277. }
  278. /* Receive a list of IPv6 packets */
  279. void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
  280. struct net_device *orig_dev)
  281. {
  282. struct net_device *curr_dev = NULL;
  283. struct net *curr_net = NULL;
  284. struct sk_buff *skb, *next;
  285. struct list_head sublist;
  286. INIT_LIST_HEAD(&sublist);
  287. list_for_each_entry_safe(skb, next, head, list) {
  288. struct net_device *dev = skb->dev;
  289. struct net *net = dev_net(dev);
  290. skb_list_del_init(skb);
  291. skb = ip6_rcv_core(skb, dev, net);
  292. if (skb == NULL)
  293. continue;
  294. if (curr_dev != dev || curr_net != net) {
  295. /* dispatch old sublist */
  296. if (!list_empty(&sublist))
  297. ip6_sublist_rcv(&sublist, curr_dev, curr_net);
  298. /* start new sublist */
  299. INIT_LIST_HEAD(&sublist);
  300. curr_dev = dev;
  301. curr_net = net;
  302. }
  303. list_add_tail(&skb->list, &sublist);
  304. }
  305. /* dispatch final sublist */
  306. if (!list_empty(&sublist))
  307. ip6_sublist_rcv(&sublist, curr_dev, curr_net);
  308. }
  309. INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *));
  310. /*
  311. * Deliver the packet to the host
  312. */
  313. void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
  314. bool have_final)
  315. {
  316. const struct inet6_protocol *ipprot;
  317. struct inet6_dev *idev;
  318. unsigned int nhoff;
  319. SKB_DR(reason);
  320. bool raw;
  321. /*
  322. * Parse extension headers
  323. */
  324. resubmit:
  325. idev = ip6_dst_idev(skb_dst(skb));
  326. nhoff = IP6CB(skb)->nhoff;
  327. if (!have_final) {
  328. if (!pskb_pull(skb, skb_transport_offset(skb)))
  329. goto discard;
  330. nexthdr = skb_network_header(skb)[nhoff];
  331. }
  332. resubmit_final:
  333. raw = raw6_local_deliver(skb, nexthdr);
  334. ipprot = rcu_dereference(inet6_protos[nexthdr]);
  335. if (ipprot) {
  336. int ret;
  337. if (have_final) {
  338. if (!(ipprot->flags & INET6_PROTO_FINAL)) {
  339. /* Once we've seen a final protocol don't
  340. * allow encapsulation on any non-final
  341. * ones. This allows foo in UDP encapsulation
  342. * to work.
  343. */
  344. goto discard;
  345. }
  346. } else if (ipprot->flags & INET6_PROTO_FINAL) {
  347. const struct ipv6hdr *hdr;
  348. int sdif = inet6_sdif(skb);
  349. struct net_device *dev;
  350. /* Only do this once for first final protocol */
  351. have_final = true;
  352. skb_postpull_rcsum(skb, skb_network_header(skb),
  353. skb_network_header_len(skb));
  354. hdr = ipv6_hdr(skb);
  355. /* skb->dev passed may be master dev for vrfs. */
  356. if (sdif) {
  357. dev = dev_get_by_index_rcu(net, sdif);
  358. if (!dev)
  359. goto discard;
  360. } else {
  361. dev = skb->dev;
  362. }
  363. if (ipv6_addr_is_multicast(&hdr->daddr) &&
  364. !ipv6_chk_mcast_addr(dev, &hdr->daddr,
  365. &hdr->saddr) &&
  366. !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) {
  367. SKB_DR_SET(reason, IP_INADDRERRORS);
  368. goto discard;
  369. }
  370. }
  371. if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) {
  372. if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
  373. SKB_DR_SET(reason, XFRM_POLICY);
  374. goto discard;
  375. }
  376. nf_reset_ct(skb);
  377. }
  378. ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv,
  379. skb);
  380. if (ret > 0) {
  381. if (ipprot->flags & INET6_PROTO_FINAL) {
  382. /* Not an extension header, most likely UDP
  383. * encapsulation. Use return value as nexthdr
  384. * protocol not nhoff (which presumably is
  385. * not set by handler).
  386. */
  387. nexthdr = ret;
  388. goto resubmit_final;
  389. } else {
  390. goto resubmit;
  391. }
  392. } else if (ret == 0) {
  393. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
  394. }
  395. } else {
  396. if (!raw) {
  397. if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
  398. __IP6_INC_STATS(net, idev,
  399. IPSTATS_MIB_INUNKNOWNPROTOS);
  400. icmpv6_send(skb, ICMPV6_PARAMPROB,
  401. ICMPV6_UNK_NEXTHDR, nhoff);
  402. SKB_DR_SET(reason, IP_NOPROTO);
  403. } else {
  404. SKB_DR_SET(reason, XFRM_POLICY);
  405. }
  406. kfree_skb_reason(skb, reason);
  407. } else {
  408. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
  409. consume_skb(skb);
  410. }
  411. }
  412. return;
  413. discard:
  414. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
  415. kfree_skb_reason(skb, reason);
  416. }
  417. static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
  418. {
  419. skb_clear_delivery_time(skb);
  420. rcu_read_lock();
  421. ip6_protocol_deliver_rcu(net, skb, 0, false);
  422. rcu_read_unlock();
  423. return 0;
  424. }
  425. int ip6_input(struct sk_buff *skb)
  426. {
  427. return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
  428. dev_net(skb->dev), NULL, skb, skb->dev, NULL,
  429. ip6_input_finish);
  430. }
  431. EXPORT_SYMBOL_GPL(ip6_input);
  432. int ip6_mc_input(struct sk_buff *skb)
  433. {
  434. int sdif = inet6_sdif(skb);
  435. const struct ipv6hdr *hdr;
  436. struct net_device *dev;
  437. bool deliver;
  438. __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
  439. __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST,
  440. skb->len);
  441. /* skb->dev passed may be master dev for vrfs. */
  442. if (sdif) {
  443. rcu_read_lock();
  444. dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif);
  445. if (!dev) {
  446. rcu_read_unlock();
  447. kfree_skb(skb);
  448. return -ENODEV;
  449. }
  450. } else {
  451. dev = skb->dev;
  452. }
  453. hdr = ipv6_hdr(skb);
  454. deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL);
  455. if (sdif)
  456. rcu_read_unlock();
  457. #ifdef CONFIG_IPV6_MROUTE
  458. /*
  459. * IPv6 multicast router mode is now supported ;)
  460. */
  461. if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) &&
  462. !(ipv6_addr_type(&hdr->daddr) &
  463. (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
  464. likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
  465. /*
  466. * Okay, we try to forward - split and duplicate
  467. * packets.
  468. */
  469. struct sk_buff *skb2;
  470. struct inet6_skb_parm *opt = IP6CB(skb);
  471. /* Check for MLD */
  472. if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
  473. /* Check if this is a mld message */
  474. u8 nexthdr = hdr->nexthdr;
  475. __be16 frag_off;
  476. int offset;
  477. /* Check if the value of Router Alert
  478. * is for MLD (0x0000).
  479. */
  480. if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) {
  481. deliver = false;
  482. if (!ipv6_ext_hdr(nexthdr)) {
  483. /* BUG */
  484. goto out;
  485. }
  486. offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
  487. &nexthdr, &frag_off);
  488. if (offset < 0)
  489. goto out;
  490. if (ipv6_is_mld(skb, nexthdr, offset))
  491. deliver = true;
  492. goto out;
  493. }
  494. /* unknown RA - process it normally */
  495. }
  496. if (deliver)
  497. skb2 = skb_clone(skb, GFP_ATOMIC);
  498. else {
  499. skb2 = skb;
  500. skb = NULL;
  501. }
  502. if (skb2) {
  503. ip6_mr_input(skb2);
  504. }
  505. }
  506. out:
  507. #endif
  508. if (likely(deliver))
  509. ip6_input(skb);
  510. else {
  511. /* discard */
  512. kfree_skb(skb);
  513. }
  514. return 0;
  515. }