udp.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * UDP over IPv6
  4. * Linux INET6 implementation
  5. *
  6. * Authors:
  7. * Pedro Roque <[email protected]>
  8. *
  9. * Based on linux/ipv4/udp.c
  10. *
  11. * Fixes:
  12. * Hideaki YOSHIFUJI : sin6_scope_id support
  13. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
  14. * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
  15. * a single port at the same time.
  16. * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
  17. * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
  18. */
  19. #include <linux/bpf-cgroup.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/socket.h>
  23. #include <linux/sockios.h>
  24. #include <linux/net.h>
  25. #include <linux/in6.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/if_arp.h>
  28. #include <linux/ipv6.h>
  29. #include <linux/icmpv6.h>
  30. #include <linux/init.h>
  31. #include <linux/module.h>
  32. #include <linux/skbuff.h>
  33. #include <linux/slab.h>
  34. #include <linux/uaccess.h>
  35. #include <linux/indirect_call_wrapper.h>
  36. #include <net/addrconf.h>
  37. #include <net/ndisc.h>
  38. #include <net/protocol.h>
  39. #include <net/transp_v6.h>
  40. #include <net/ip6_route.h>
  41. #include <net/raw.h>
  42. #include <net/seg6.h>
  43. #include <net/tcp_states.h>
  44. #include <net/ip6_checksum.h>
  45. #include <net/ip6_tunnel.h>
  46. #include <net/xfrm.h>
  47. #include <net/inet_hashtables.h>
  48. #include <net/inet6_hashtables.h>
  49. #include <net/busy_poll.h>
  50. #include <net/sock_reuseport.h>
  51. #include <linux/proc_fs.h>
  52. #include <linux/seq_file.h>
  53. #include <trace/events/skb.h>
  54. #include "udp_impl.h"
  55. static void udpv6_destruct_sock(struct sock *sk)
  56. {
  57. udp_destruct_common(sk);
  58. inet6_sock_destruct(sk);
  59. }
  60. int udpv6_init_sock(struct sock *sk)
  61. {
  62. skb_queue_head_init(&udp_sk(sk)->reader_queue);
  63. sk->sk_destruct = udpv6_destruct_sock;
  64. set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
  65. return 0;
  66. }
  67. static u32 udp6_ehashfn(const struct net *net,
  68. const struct in6_addr *laddr,
  69. const u16 lport,
  70. const struct in6_addr *faddr,
  71. const __be16 fport)
  72. {
  73. static u32 udp6_ehash_secret __read_mostly;
  74. static u32 udp_ipv6_hash_secret __read_mostly;
  75. u32 lhash, fhash;
  76. net_get_random_once(&udp6_ehash_secret,
  77. sizeof(udp6_ehash_secret));
  78. net_get_random_once(&udp_ipv6_hash_secret,
  79. sizeof(udp_ipv6_hash_secret));
  80. lhash = (__force u32)laddr->s6_addr32[3];
  81. fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
  82. return __inet6_ehashfn(lhash, lport, fhash, fport,
  83. udp6_ehash_secret + net_hash_mix(net));
  84. }
  85. int udp_v6_get_port(struct sock *sk, unsigned short snum)
  86. {
  87. unsigned int hash2_nulladdr =
  88. ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
  89. unsigned int hash2_partial =
  90. ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
  91. /* precompute partial secondary hash */
  92. udp_sk(sk)->udp_portaddr_hash = hash2_partial;
  93. return udp_lib_get_port(sk, snum, hash2_nulladdr);
  94. }
  95. void udp_v6_rehash(struct sock *sk)
  96. {
  97. u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
  98. &sk->sk_v6_rcv_saddr,
  99. inet_sk(sk)->inet_num);
  100. udp_lib_rehash(sk, new_hash);
  101. }
  102. static int compute_score(struct sock *sk, struct net *net,
  103. const struct in6_addr *saddr, __be16 sport,
  104. const struct in6_addr *daddr, unsigned short hnum,
  105. int dif, int sdif)
  106. {
  107. int bound_dev_if, score;
  108. struct inet_sock *inet;
  109. bool dev_match;
  110. if (!net_eq(sock_net(sk), net) ||
  111. udp_sk(sk)->udp_port_hash != hnum ||
  112. sk->sk_family != PF_INET6)
  113. return -1;
  114. if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
  115. return -1;
  116. score = 0;
  117. inet = inet_sk(sk);
  118. if (inet->inet_dport) {
  119. if (inet->inet_dport != sport)
  120. return -1;
  121. score++;
  122. }
  123. if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
  124. if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
  125. return -1;
  126. score++;
  127. }
  128. bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
  129. dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
  130. if (!dev_match)
  131. return -1;
  132. if (bound_dev_if)
  133. score++;
  134. if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
  135. score++;
  136. return score;
  137. }
  138. static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
  139. struct sk_buff *skb,
  140. const struct in6_addr *saddr,
  141. __be16 sport,
  142. const struct in6_addr *daddr,
  143. unsigned int hnum)
  144. {
  145. struct sock *reuse_sk = NULL;
  146. u32 hash;
  147. if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
  148. hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
  149. reuse_sk = reuseport_select_sock(sk, hash, skb,
  150. sizeof(struct udphdr));
  151. }
  152. return reuse_sk;
  153. }
  154. /* called with rcu_read_lock() */
  155. static struct sock *udp6_lib_lookup2(struct net *net,
  156. const struct in6_addr *saddr, __be16 sport,
  157. const struct in6_addr *daddr, unsigned int hnum,
  158. int dif, int sdif, struct udp_hslot *hslot2,
  159. struct sk_buff *skb)
  160. {
  161. struct sock *sk, *result;
  162. int score, badness;
  163. result = NULL;
  164. badness = -1;
  165. udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
  166. score = compute_score(sk, net, saddr, sport,
  167. daddr, hnum, dif, sdif);
  168. if (score > badness) {
  169. badness = score;
  170. result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
  171. if (!result) {
  172. result = sk;
  173. continue;
  174. }
  175. /* Fall back to scoring if group has connections */
  176. if (!reuseport_has_conns(sk))
  177. return result;
  178. /* Reuseport logic returned an error, keep original score. */
  179. if (IS_ERR(result))
  180. continue;
  181. badness = compute_score(sk, net, saddr, sport,
  182. daddr, hnum, dif, sdif);
  183. }
  184. }
  185. return result;
  186. }
  187. static inline struct sock *udp6_lookup_run_bpf(struct net *net,
  188. struct udp_table *udptable,
  189. struct sk_buff *skb,
  190. const struct in6_addr *saddr,
  191. __be16 sport,
  192. const struct in6_addr *daddr,
  193. u16 hnum, const int dif)
  194. {
  195. struct sock *sk, *reuse_sk;
  196. bool no_reuseport;
  197. if (udptable != &udp_table)
  198. return NULL; /* only UDP is supported */
  199. no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
  200. daddr, hnum, dif, &sk);
  201. if (no_reuseport || IS_ERR_OR_NULL(sk))
  202. return sk;
  203. reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
  204. if (reuse_sk)
  205. sk = reuse_sk;
  206. return sk;
  207. }
  208. /* rcu_read_lock() must be held */
  209. struct sock *__udp6_lib_lookup(struct net *net,
  210. const struct in6_addr *saddr, __be16 sport,
  211. const struct in6_addr *daddr, __be16 dport,
  212. int dif, int sdif, struct udp_table *udptable,
  213. struct sk_buff *skb)
  214. {
  215. unsigned short hnum = ntohs(dport);
  216. unsigned int hash2, slot2;
  217. struct udp_hslot *hslot2;
  218. struct sock *result, *sk;
  219. hash2 = ipv6_portaddr_hash(net, daddr, hnum);
  220. slot2 = hash2 & udptable->mask;
  221. hslot2 = &udptable->hash2[slot2];
  222. /* Lookup connected or non-wildcard sockets */
  223. result = udp6_lib_lookup2(net, saddr, sport,
  224. daddr, hnum, dif, sdif,
  225. hslot2, skb);
  226. if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
  227. goto done;
  228. /* Lookup redirect from BPF */
  229. if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
  230. sk = udp6_lookup_run_bpf(net, udptable, skb,
  231. saddr, sport, daddr, hnum, dif);
  232. if (sk) {
  233. result = sk;
  234. goto done;
  235. }
  236. }
  237. /* Got non-wildcard socket or error on first lookup */
  238. if (result)
  239. goto done;
  240. /* Lookup wildcard sockets */
  241. hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
  242. slot2 = hash2 & udptable->mask;
  243. hslot2 = &udptable->hash2[slot2];
  244. result = udp6_lib_lookup2(net, saddr, sport,
  245. &in6addr_any, hnum, dif, sdif,
  246. hslot2, skb);
  247. done:
  248. if (IS_ERR(result))
  249. return NULL;
  250. return result;
  251. }
  252. EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
  253. static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
  254. __be16 sport, __be16 dport,
  255. struct udp_table *udptable)
  256. {
  257. const struct ipv6hdr *iph = ipv6_hdr(skb);
  258. return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
  259. &iph->daddr, dport, inet6_iif(skb),
  260. inet6_sdif(skb), udptable, skb);
  261. }
  262. struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
  263. __be16 sport, __be16 dport)
  264. {
  265. const struct ipv6hdr *iph = ipv6_hdr(skb);
  266. return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
  267. &iph->daddr, dport, inet6_iif(skb),
  268. inet6_sdif(skb), &udp_table, NULL);
  269. }
  270. /* Must be called under rcu_read_lock().
  271. * Does increment socket refcount.
  272. */
  273. #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
  274. struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
  275. const struct in6_addr *daddr, __be16 dport, int dif)
  276. {
  277. struct sock *sk;
  278. sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
  279. dif, 0, &udp_table, NULL);
  280. if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
  281. sk = NULL;
  282. return sk;
  283. }
  284. EXPORT_SYMBOL_GPL(udp6_lib_lookup);
  285. #endif
  286. /* do not use the scratch area len for jumbogram: their length execeeds the
  287. * scratch area space; note that the IP6CB flags is still in the first
  288. * cacheline, so checking for jumbograms is cheap
  289. */
  290. static int udp6_skb_len(struct sk_buff *skb)
  291. {
  292. return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
  293. }
  294. /*
  295. * This should be easy, if there is something there we
  296. * return it, otherwise we block.
  297. */
  298. int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  299. int flags, int *addr_len)
  300. {
  301. struct ipv6_pinfo *np = inet6_sk(sk);
  302. struct inet_sock *inet = inet_sk(sk);
  303. struct sk_buff *skb;
  304. unsigned int ulen, copied;
  305. int off, err, peeking = flags & MSG_PEEK;
  306. int is_udplite = IS_UDPLITE(sk);
  307. struct udp_mib __percpu *mib;
  308. bool checksum_valid = false;
  309. int is_udp4;
  310. if (flags & MSG_ERRQUEUE)
  311. return ipv6_recv_error(sk, msg, len, addr_len);
  312. if (np->rxpmtu && np->rxopt.bits.rxpmtu)
  313. return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
  314. try_again:
  315. off = sk_peek_offset(sk, flags);
  316. skb = __skb_recv_udp(sk, flags, &off, &err);
  317. if (!skb)
  318. return err;
  319. ulen = udp6_skb_len(skb);
  320. copied = len;
  321. if (copied > ulen - off)
  322. copied = ulen - off;
  323. else if (copied < ulen)
  324. msg->msg_flags |= MSG_TRUNC;
  325. is_udp4 = (skb->protocol == htons(ETH_P_IP));
  326. mib = __UDPX_MIB(sk, is_udp4);
  327. /*
  328. * If checksum is needed at all, try to do it while copying the
  329. * data. If the data is truncated, or if we only want a partial
  330. * coverage checksum (UDP-Lite), do it before the copy.
  331. */
  332. if (copied < ulen || peeking ||
  333. (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
  334. checksum_valid = udp_skb_csum_unnecessary(skb) ||
  335. !__udp_lib_checksum_complete(skb);
  336. if (!checksum_valid)
  337. goto csum_copy_err;
  338. }
  339. if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
  340. if (udp_skb_is_linear(skb))
  341. err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
  342. else
  343. err = skb_copy_datagram_msg(skb, off, msg, copied);
  344. } else {
  345. err = skb_copy_and_csum_datagram_msg(skb, off, msg);
  346. if (err == -EINVAL)
  347. goto csum_copy_err;
  348. }
  349. if (unlikely(err)) {
  350. if (!peeking) {
  351. atomic_inc(&sk->sk_drops);
  352. SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
  353. }
  354. kfree_skb(skb);
  355. return err;
  356. }
  357. if (!peeking)
  358. SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
  359. sock_recv_cmsgs(msg, sk, skb);
  360. /* Copy the address. */
  361. if (msg->msg_name) {
  362. DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
  363. sin6->sin6_family = AF_INET6;
  364. sin6->sin6_port = udp_hdr(skb)->source;
  365. sin6->sin6_flowinfo = 0;
  366. if (is_udp4) {
  367. ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
  368. &sin6->sin6_addr);
  369. sin6->sin6_scope_id = 0;
  370. } else {
  371. sin6->sin6_addr = ipv6_hdr(skb)->saddr;
  372. sin6->sin6_scope_id =
  373. ipv6_iface_scope_id(&sin6->sin6_addr,
  374. inet6_iif(skb));
  375. }
  376. *addr_len = sizeof(*sin6);
  377. BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
  378. (struct sockaddr *)sin6);
  379. }
  380. if (udp_sk(sk)->gro_enabled)
  381. udp_cmsg_recv(msg, sk, skb);
  382. if (np->rxopt.all)
  383. ip6_datagram_recv_common_ctl(sk, msg, skb);
  384. if (is_udp4) {
  385. if (inet->cmsg_flags)
  386. ip_cmsg_recv_offset(msg, sk, skb,
  387. sizeof(struct udphdr), off);
  388. } else {
  389. if (np->rxopt.all)
  390. ip6_datagram_recv_specific_ctl(sk, msg, skb);
  391. }
  392. err = copied;
  393. if (flags & MSG_TRUNC)
  394. err = ulen;
  395. skb_consume_udp(sk, skb, peeking ? -err : err);
  396. return err;
  397. csum_copy_err:
  398. if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
  399. udp_skb_destructor)) {
  400. SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
  401. SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
  402. }
  403. kfree_skb(skb);
  404. /* starting over for a new packet, but check if we need to yield */
  405. cond_resched();
  406. msg->msg_flags &= ~MSG_TRUNC;
  407. goto try_again;
  408. }
  409. DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
  410. void udpv6_encap_enable(void)
  411. {
  412. static_branch_inc(&udpv6_encap_needed_key);
  413. }
  414. EXPORT_SYMBOL(udpv6_encap_enable);
  415. /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
  416. * through error handlers in encapsulations looking for a match.
  417. */
  418. static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
  419. struct inet6_skb_parm *opt,
  420. u8 type, u8 code, int offset, __be32 info)
  421. {
  422. int i;
  423. for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
  424. int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
  425. u8 type, u8 code, int offset, __be32 info);
  426. const struct ip6_tnl_encap_ops *encap;
  427. encap = rcu_dereference(ip6tun_encaps[i]);
  428. if (!encap)
  429. continue;
  430. handler = encap->err_handler;
  431. if (handler && !handler(skb, opt, type, code, offset, info))
  432. return 0;
  433. }
  434. return -ENOENT;
  435. }
  436. /* Try to match ICMP errors to UDP tunnels by looking up a socket without
  437. * reversing source and destination port: this will match tunnels that force the
  438. * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
  439. * lwtunnels might actually break this assumption by being configured with
  440. * different destination ports on endpoints, in this case we won't be able to
  441. * trace ICMP messages back to them.
  442. *
  443. * If this doesn't match any socket, probe tunnels with arbitrary destination
  444. * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
  445. * we've sent packets to won't necessarily match the local destination port.
  446. *
  447. * Then ask the tunnel implementation to match the error against a valid
  448. * association.
  449. *
  450. * Return an error if we can't find a match, the socket if we need further
  451. * processing, zero otherwise.
  452. */
  453. static struct sock *__udp6_lib_err_encap(struct net *net,
  454. const struct ipv6hdr *hdr, int offset,
  455. struct udphdr *uh,
  456. struct udp_table *udptable,
  457. struct sock *sk,
  458. struct sk_buff *skb,
  459. struct inet6_skb_parm *opt,
  460. u8 type, u8 code, __be32 info)
  461. {
  462. int (*lookup)(struct sock *sk, struct sk_buff *skb);
  463. int network_offset, transport_offset;
  464. struct udp_sock *up;
  465. network_offset = skb_network_offset(skb);
  466. transport_offset = skb_transport_offset(skb);
  467. /* Network header needs to point to the outer IPv6 header inside ICMP */
  468. skb_reset_network_header(skb);
  469. /* Transport header needs to point to the UDP header */
  470. skb_set_transport_header(skb, offset);
  471. if (sk) {
  472. up = udp_sk(sk);
  473. lookup = READ_ONCE(up->encap_err_lookup);
  474. if (lookup && lookup(sk, skb))
  475. sk = NULL;
  476. goto out;
  477. }
  478. sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
  479. &hdr->saddr, uh->dest,
  480. inet6_iif(skb), 0, udptable, skb);
  481. if (sk) {
  482. up = udp_sk(sk);
  483. lookup = READ_ONCE(up->encap_err_lookup);
  484. if (!lookup || lookup(sk, skb))
  485. sk = NULL;
  486. }
  487. out:
  488. if (!sk) {
  489. sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
  490. offset, info));
  491. }
  492. skb_set_transport_header(skb, transport_offset);
  493. skb_set_network_header(skb, network_offset);
  494. return sk;
  495. }
  496. int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  497. u8 type, u8 code, int offset, __be32 info,
  498. struct udp_table *udptable)
  499. {
  500. struct ipv6_pinfo *np;
  501. const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
  502. const struct in6_addr *saddr = &hdr->saddr;
  503. const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
  504. struct udphdr *uh = (struct udphdr *)(skb->data+offset);
  505. bool tunnel = false;
  506. struct sock *sk;
  507. int harderr;
  508. int err;
  509. struct net *net = dev_net(skb->dev);
  510. sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
  511. inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
  512. if (!sk || udp_sk(sk)->encap_type) {
  513. /* No socket for error: try tunnels before discarding */
  514. if (static_branch_unlikely(&udpv6_encap_needed_key)) {
  515. sk = __udp6_lib_err_encap(net, hdr, offset, uh,
  516. udptable, sk, skb,
  517. opt, type, code, info);
  518. if (!sk)
  519. return 0;
  520. } else
  521. sk = ERR_PTR(-ENOENT);
  522. if (IS_ERR(sk)) {
  523. __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
  524. ICMP6_MIB_INERRORS);
  525. return PTR_ERR(sk);
  526. }
  527. tunnel = true;
  528. }
  529. harderr = icmpv6_err_convert(type, code, &err);
  530. np = inet6_sk(sk);
  531. if (type == ICMPV6_PKT_TOOBIG) {
  532. if (!ip6_sk_accept_pmtu(sk))
  533. goto out;
  534. ip6_sk_update_pmtu(skb, sk, info);
  535. if (np->pmtudisc != IPV6_PMTUDISC_DONT)
  536. harderr = 1;
  537. }
  538. if (type == NDISC_REDIRECT) {
  539. if (tunnel) {
  540. ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
  541. READ_ONCE(sk->sk_mark), sk->sk_uid);
  542. } else {
  543. ip6_sk_redirect(skb, sk);
  544. }
  545. goto out;
  546. }
  547. /* Tunnels don't have an application socket: don't pass errors back */
  548. if (tunnel) {
  549. if (udp_sk(sk)->encap_err_rcv)
  550. udp_sk(sk)->encap_err_rcv(sk, skb, offset);
  551. goto out;
  552. }
  553. if (!np->recverr) {
  554. if (!harderr || sk->sk_state != TCP_ESTABLISHED)
  555. goto out;
  556. } else {
  557. ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
  558. }
  559. sk->sk_err = err;
  560. sk_error_report(sk);
  561. out:
  562. return 0;
  563. }
  564. static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  565. {
  566. int rc;
  567. if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
  568. sock_rps_save_rxhash(sk, skb);
  569. sk_mark_napi_id(sk, skb);
  570. sk_incoming_cpu_update(sk);
  571. } else {
  572. sk_mark_napi_id_once(sk, skb);
  573. }
  574. rc = __udp_enqueue_schedule_skb(sk, skb);
  575. if (rc < 0) {
  576. int is_udplite = IS_UDPLITE(sk);
  577. enum skb_drop_reason drop_reason;
  578. /* Note that an ENOMEM error is charged twice */
  579. if (rc == -ENOMEM) {
  580. UDP6_INC_STATS(sock_net(sk),
  581. UDP_MIB_RCVBUFERRORS, is_udplite);
  582. drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
  583. } else {
  584. UDP6_INC_STATS(sock_net(sk),
  585. UDP_MIB_MEMERRORS, is_udplite);
  586. drop_reason = SKB_DROP_REASON_PROTO_MEM;
  587. }
  588. UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  589. kfree_skb_reason(skb, drop_reason);
  590. return -1;
  591. }
  592. return 0;
  593. }
  594. static __inline__ int udpv6_err(struct sk_buff *skb,
  595. struct inet6_skb_parm *opt, u8 type,
  596. u8 code, int offset, __be32 info)
  597. {
  598. return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
  599. }
  600. static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
  601. {
  602. enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
  603. struct udp_sock *up = udp_sk(sk);
  604. int is_udplite = IS_UDPLITE(sk);
  605. if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
  606. drop_reason = SKB_DROP_REASON_XFRM_POLICY;
  607. goto drop;
  608. }
  609. nf_reset_ct(skb);
  610. if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
  611. int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
  612. /*
  613. * This is an encapsulation socket so pass the skb to
  614. * the socket's udp_encap_rcv() hook. Otherwise, just
  615. * fall through and pass this up the UDP socket.
  616. * up->encap_rcv() returns the following value:
  617. * =0 if skb was successfully passed to the encap
  618. * handler or was discarded by it.
  619. * >0 if skb should be passed on to UDP.
  620. * <0 if skb should be resubmitted as proto -N
  621. */
  622. /* if we're overly short, let UDP handle it */
  623. encap_rcv = READ_ONCE(up->encap_rcv);
  624. if (encap_rcv) {
  625. int ret;
  626. /* Verify checksum before giving to encap */
  627. if (udp_lib_checksum_complete(skb))
  628. goto csum_error;
  629. ret = encap_rcv(sk, skb);
  630. if (ret <= 0) {
  631. __UDP6_INC_STATS(sock_net(sk),
  632. UDP_MIB_INDATAGRAMS,
  633. is_udplite);
  634. return -ret;
  635. }
  636. }
  637. /* FALLTHROUGH -- it's a UDP Packet */
  638. }
  639. /*
  640. * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
  641. */
  642. if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
  643. if (up->pcrlen == 0) { /* full coverage was set */
  644. net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
  645. UDP_SKB_CB(skb)->cscov, skb->len);
  646. goto drop;
  647. }
  648. if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
  649. net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
  650. UDP_SKB_CB(skb)->cscov, up->pcrlen);
  651. goto drop;
  652. }
  653. }
  654. prefetch(&sk->sk_rmem_alloc);
  655. if (rcu_access_pointer(sk->sk_filter) &&
  656. udp_lib_checksum_complete(skb))
  657. goto csum_error;
  658. if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
  659. drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
  660. goto drop;
  661. }
  662. udp_csum_pull_header(skb);
  663. skb_dst_drop(skb);
  664. return __udpv6_queue_rcv_skb(sk, skb);
  665. csum_error:
  666. drop_reason = SKB_DROP_REASON_UDP_CSUM;
  667. __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  668. drop:
  669. __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  670. atomic_inc(&sk->sk_drops);
  671. kfree_skb_reason(skb, drop_reason);
  672. return -1;
  673. }
  674. static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  675. {
  676. struct sk_buff *next, *segs;
  677. int ret;
  678. if (likely(!udp_unexpected_gso(sk, skb)))
  679. return udpv6_queue_rcv_one_skb(sk, skb);
  680. __skb_push(skb, -skb_mac_offset(skb));
  681. segs = udp_rcv_segment(sk, skb, false);
  682. skb_list_walk_safe(segs, skb, next) {
  683. __skb_pull(skb, skb_transport_offset(skb));
  684. udp_post_segment_fix_csum(skb);
  685. ret = udpv6_queue_rcv_one_skb(sk, skb);
  686. if (ret > 0)
  687. ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
  688. true);
  689. }
  690. return 0;
  691. }
  692. static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
  693. __be16 loc_port, const struct in6_addr *loc_addr,
  694. __be16 rmt_port, const struct in6_addr *rmt_addr,
  695. int dif, int sdif, unsigned short hnum)
  696. {
  697. struct inet_sock *inet = inet_sk(sk);
  698. if (!net_eq(sock_net(sk), net))
  699. return false;
  700. if (udp_sk(sk)->udp_port_hash != hnum ||
  701. sk->sk_family != PF_INET6 ||
  702. (inet->inet_dport && inet->inet_dport != rmt_port) ||
  703. (!ipv6_addr_any(&sk->sk_v6_daddr) &&
  704. !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
  705. !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
  706. (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
  707. !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
  708. return false;
  709. if (!inet6_mc_check(sk, loc_addr, rmt_addr))
  710. return false;
  711. return true;
  712. }
  713. static void udp6_csum_zero_error(struct sk_buff *skb)
  714. {
  715. /* RFC 2460 section 8.1 says that we SHOULD log
  716. * this error. Well, it is reasonable.
  717. */
  718. net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
  719. &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
  720. &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
  721. }
  722. /*
  723. * Note: called only from the BH handler context,
  724. * so we don't need to lock the hashes.
  725. */
  726. static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
  727. const struct in6_addr *saddr, const struct in6_addr *daddr,
  728. struct udp_table *udptable, int proto)
  729. {
  730. struct sock *sk, *first = NULL;
  731. const struct udphdr *uh = udp_hdr(skb);
  732. unsigned short hnum = ntohs(uh->dest);
  733. struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
  734. unsigned int offset = offsetof(typeof(*sk), sk_node);
  735. unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
  736. int dif = inet6_iif(skb);
  737. int sdif = inet6_sdif(skb);
  738. struct hlist_node *node;
  739. struct sk_buff *nskb;
  740. if (use_hash2) {
  741. hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
  742. udptable->mask;
  743. hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
  744. start_lookup:
  745. hslot = &udptable->hash2[hash2];
  746. offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
  747. }
  748. sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
  749. if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
  750. uh->source, saddr, dif, sdif,
  751. hnum))
  752. continue;
  753. /* If zero checksum and no_check is not on for
  754. * the socket then skip it.
  755. */
  756. if (!uh->check && !udp_sk(sk)->no_check6_rx)
  757. continue;
  758. if (!first) {
  759. first = sk;
  760. continue;
  761. }
  762. nskb = skb_clone(skb, GFP_ATOMIC);
  763. if (unlikely(!nskb)) {
  764. atomic_inc(&sk->sk_drops);
  765. __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
  766. IS_UDPLITE(sk));
  767. __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
  768. IS_UDPLITE(sk));
  769. continue;
  770. }
  771. if (udpv6_queue_rcv_skb(sk, nskb) > 0)
  772. consume_skb(nskb);
  773. }
  774. /* Also lookup *:port if we are using hash2 and haven't done so yet. */
  775. if (use_hash2 && hash2 != hash2_any) {
  776. hash2 = hash2_any;
  777. goto start_lookup;
  778. }
  779. if (first) {
  780. if (udpv6_queue_rcv_skb(first, skb) > 0)
  781. consume_skb(skb);
  782. } else {
  783. kfree_skb(skb);
  784. __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
  785. proto == IPPROTO_UDPLITE);
  786. }
  787. return 0;
  788. }
  789. static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
  790. {
  791. if (udp_sk_rx_dst_set(sk, dst)) {
  792. const struct rt6_info *rt = (const struct rt6_info *)dst;
  793. sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
  794. }
  795. }
  796. /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
  797. * return code conversion for ip layer consumption
  798. */
  799. static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
  800. struct udphdr *uh)
  801. {
  802. int ret;
  803. if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
  804. skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
  805. ret = udpv6_queue_rcv_skb(sk, skb);
  806. /* a return value > 0 means to resubmit the input */
  807. if (ret > 0)
  808. return ret;
  809. return 0;
  810. }
  811. int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
  812. int proto)
  813. {
  814. enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
  815. const struct in6_addr *saddr, *daddr;
  816. struct net *net = dev_net(skb->dev);
  817. struct udphdr *uh;
  818. struct sock *sk;
  819. bool refcounted;
  820. u32 ulen = 0;
  821. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  822. goto discard;
  823. saddr = &ipv6_hdr(skb)->saddr;
  824. daddr = &ipv6_hdr(skb)->daddr;
  825. uh = udp_hdr(skb);
  826. ulen = ntohs(uh->len);
  827. if (ulen > skb->len)
  828. goto short_packet;
  829. if (proto == IPPROTO_UDP) {
  830. /* UDP validates ulen. */
  831. /* Check for jumbo payload */
  832. if (ulen == 0)
  833. ulen = skb->len;
  834. if (ulen < sizeof(*uh))
  835. goto short_packet;
  836. if (ulen < skb->len) {
  837. if (pskb_trim_rcsum(skb, ulen))
  838. goto short_packet;
  839. saddr = &ipv6_hdr(skb)->saddr;
  840. daddr = &ipv6_hdr(skb)->daddr;
  841. uh = udp_hdr(skb);
  842. }
  843. }
  844. if (udp6_csum_init(skb, uh, proto))
  845. goto csum_error;
  846. /* Check if the socket is already available, e.g. due to early demux */
  847. sk = skb_steal_sock(skb, &refcounted);
  848. if (sk) {
  849. struct dst_entry *dst = skb_dst(skb);
  850. int ret;
  851. if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
  852. udp6_sk_rx_dst_set(sk, dst);
  853. if (!uh->check && !udp_sk(sk)->no_check6_rx) {
  854. if (refcounted)
  855. sock_put(sk);
  856. goto report_csum_error;
  857. }
  858. ret = udp6_unicast_rcv_skb(sk, skb, uh);
  859. if (refcounted)
  860. sock_put(sk);
  861. return ret;
  862. }
  863. /*
  864. * Multicast receive code
  865. */
  866. if (ipv6_addr_is_multicast(daddr))
  867. return __udp6_lib_mcast_deliver(net, skb,
  868. saddr, daddr, udptable, proto);
  869. /* Unicast */
  870. sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
  871. if (sk) {
  872. if (!uh->check && !udp_sk(sk)->no_check6_rx)
  873. goto report_csum_error;
  874. return udp6_unicast_rcv_skb(sk, skb, uh);
  875. }
  876. reason = SKB_DROP_REASON_NO_SOCKET;
  877. if (!uh->check)
  878. goto report_csum_error;
  879. if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
  880. goto discard;
  881. nf_reset_ct(skb);
  882. if (udp_lib_checksum_complete(skb))
  883. goto csum_error;
  884. __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
  885. icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
  886. kfree_skb_reason(skb, reason);
  887. return 0;
  888. short_packet:
  889. if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
  890. reason = SKB_DROP_REASON_PKT_TOO_SMALL;
  891. net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
  892. proto == IPPROTO_UDPLITE ? "-Lite" : "",
  893. saddr, ntohs(uh->source),
  894. ulen, skb->len,
  895. daddr, ntohs(uh->dest));
  896. goto discard;
  897. report_csum_error:
  898. udp6_csum_zero_error(skb);
  899. csum_error:
  900. if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
  901. reason = SKB_DROP_REASON_UDP_CSUM;
  902. __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
  903. discard:
  904. __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
  905. kfree_skb_reason(skb, reason);
  906. return 0;
  907. }
  908. static struct sock *__udp6_lib_demux_lookup(struct net *net,
  909. __be16 loc_port, const struct in6_addr *loc_addr,
  910. __be16 rmt_port, const struct in6_addr *rmt_addr,
  911. int dif, int sdif)
  912. {
  913. unsigned short hnum = ntohs(loc_port);
  914. unsigned int hash2, slot2;
  915. struct udp_hslot *hslot2;
  916. __portpair ports;
  917. struct sock *sk;
  918. hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
  919. slot2 = hash2 & udp_table.mask;
  920. hslot2 = &udp_table.hash2[slot2];
  921. ports = INET_COMBINED_PORTS(rmt_port, hnum);
  922. udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
  923. if (sk->sk_state == TCP_ESTABLISHED &&
  924. inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
  925. return sk;
  926. /* Only check first socket in chain */
  927. break;
  928. }
  929. return NULL;
  930. }
  931. void udp_v6_early_demux(struct sk_buff *skb)
  932. {
  933. struct net *net = dev_net(skb->dev);
  934. const struct udphdr *uh;
  935. struct sock *sk;
  936. struct dst_entry *dst;
  937. int dif = skb->dev->ifindex;
  938. int sdif = inet6_sdif(skb);
  939. if (!pskb_may_pull(skb, skb_transport_offset(skb) +
  940. sizeof(struct udphdr)))
  941. return;
  942. uh = udp_hdr(skb);
  943. if (skb->pkt_type == PACKET_HOST)
  944. sk = __udp6_lib_demux_lookup(net, uh->dest,
  945. &ipv6_hdr(skb)->daddr,
  946. uh->source, &ipv6_hdr(skb)->saddr,
  947. dif, sdif);
  948. else
  949. return;
  950. if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
  951. return;
  952. skb->sk = sk;
  953. skb->destructor = sock_efree;
  954. dst = rcu_dereference(sk->sk_rx_dst);
  955. if (dst)
  956. dst = dst_check(dst, sk->sk_rx_dst_cookie);
  957. if (dst) {
  958. /* set noref for now.
  959. * any place which wants to hold dst has to call
  960. * dst_hold_safe()
  961. */
  962. skb_dst_set_noref(skb, dst);
  963. }
  964. }
  965. INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
  966. {
  967. return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
  968. }
  969. /*
  970. * Throw away all pending data and cancel the corking. Socket is locked.
  971. */
  972. static void udp_v6_flush_pending_frames(struct sock *sk)
  973. {
  974. struct udp_sock *up = udp_sk(sk);
  975. if (up->pending == AF_INET)
  976. udp_flush_pending_frames(sk);
  977. else if (up->pending) {
  978. up->len = 0;
  979. up->pending = 0;
  980. ip6_flush_pending_frames(sk);
  981. }
  982. }
  983. static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
  984. int addr_len)
  985. {
  986. if (addr_len < offsetofend(struct sockaddr, sa_family))
  987. return -EINVAL;
  988. /* The following checks are replicated from __ip6_datagram_connect()
  989. * and intended to prevent BPF program called below from accessing
  990. * bytes that are out of the bound specified by user in addr_len.
  991. */
  992. if (uaddr->sa_family == AF_INET) {
  993. if (ipv6_only_sock(sk))
  994. return -EAFNOSUPPORT;
  995. return udp_pre_connect(sk, uaddr, addr_len);
  996. }
  997. if (addr_len < SIN6_LEN_RFC2133)
  998. return -EINVAL;
  999. return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
  1000. }
  1001. /**
  1002. * udp6_hwcsum_outgoing - handle outgoing HW checksumming
  1003. * @sk: socket we are sending on
  1004. * @skb: sk_buff containing the filled-in UDP header
  1005. * (checksum field must be zeroed out)
  1006. * @saddr: source address
  1007. * @daddr: destination address
  1008. * @len: length of packet
  1009. */
  1010. static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
  1011. const struct in6_addr *saddr,
  1012. const struct in6_addr *daddr, int len)
  1013. {
  1014. unsigned int offset;
  1015. struct udphdr *uh = udp_hdr(skb);
  1016. struct sk_buff *frags = skb_shinfo(skb)->frag_list;
  1017. __wsum csum = 0;
  1018. if (!frags) {
  1019. /* Only one fragment on the socket. */
  1020. skb->csum_start = skb_transport_header(skb) - skb->head;
  1021. skb->csum_offset = offsetof(struct udphdr, check);
  1022. uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
  1023. } else {
  1024. /*
  1025. * HW-checksum won't work as there are two or more
  1026. * fragments on the socket so that all csums of sk_buffs
  1027. * should be together
  1028. */
  1029. offset = skb_transport_offset(skb);
  1030. skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  1031. csum = skb->csum;
  1032. skb->ip_summed = CHECKSUM_NONE;
  1033. do {
  1034. csum = csum_add(csum, frags->csum);
  1035. } while ((frags = frags->next));
  1036. uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
  1037. csum);
  1038. if (uh->check == 0)
  1039. uh->check = CSUM_MANGLED_0;
  1040. }
  1041. }
  1042. /*
  1043. * Sending
  1044. */
  1045. static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
  1046. struct inet_cork *cork)
  1047. {
  1048. struct sock *sk = skb->sk;
  1049. struct udphdr *uh;
  1050. int err = 0;
  1051. int is_udplite = IS_UDPLITE(sk);
  1052. __wsum csum = 0;
  1053. int offset = skb_transport_offset(skb);
  1054. int len = skb->len - offset;
  1055. int datalen = len - sizeof(*uh);
  1056. /*
  1057. * Create a UDP header
  1058. */
  1059. uh = udp_hdr(skb);
  1060. uh->source = fl6->fl6_sport;
  1061. uh->dest = fl6->fl6_dport;
  1062. uh->len = htons(len);
  1063. uh->check = 0;
  1064. if (cork->gso_size) {
  1065. const int hlen = skb_network_header_len(skb) +
  1066. sizeof(struct udphdr);
  1067. if (hlen + cork->gso_size > cork->fragsize) {
  1068. kfree_skb(skb);
  1069. return -EINVAL;
  1070. }
  1071. if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
  1072. kfree_skb(skb);
  1073. return -EINVAL;
  1074. }
  1075. if (udp_sk(sk)->no_check6_tx) {
  1076. kfree_skb(skb);
  1077. return -EINVAL;
  1078. }
  1079. if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
  1080. dst_xfrm(skb_dst(skb))) {
  1081. kfree_skb(skb);
  1082. return -EIO;
  1083. }
  1084. if (datalen > cork->gso_size) {
  1085. skb_shinfo(skb)->gso_size = cork->gso_size;
  1086. skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
  1087. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
  1088. cork->gso_size);
  1089. }
  1090. goto csum_partial;
  1091. }
  1092. if (is_udplite)
  1093. csum = udplite_csum(skb);
  1094. else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
  1095. skb->ip_summed = CHECKSUM_NONE;
  1096. goto send;
  1097. } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
  1098. csum_partial:
  1099. udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
  1100. goto send;
  1101. } else
  1102. csum = udp_csum(skb);
  1103. /* add protocol-dependent pseudo-header */
  1104. uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
  1105. len, fl6->flowi6_proto, csum);
  1106. if (uh->check == 0)
  1107. uh->check = CSUM_MANGLED_0;
  1108. send:
  1109. err = ip6_send_skb(skb);
  1110. if (err) {
  1111. if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
  1112. UDP6_INC_STATS(sock_net(sk),
  1113. UDP_MIB_SNDBUFERRORS, is_udplite);
  1114. err = 0;
  1115. }
  1116. } else {
  1117. UDP6_INC_STATS(sock_net(sk),
  1118. UDP_MIB_OUTDATAGRAMS, is_udplite);
  1119. }
  1120. return err;
  1121. }
  1122. static int udp_v6_push_pending_frames(struct sock *sk)
  1123. {
  1124. struct sk_buff *skb;
  1125. struct udp_sock *up = udp_sk(sk);
  1126. int err = 0;
  1127. if (up->pending == AF_INET)
  1128. return udp_push_pending_frames(sk);
  1129. skb = ip6_finish_skb(sk);
  1130. if (!skb)
  1131. goto out;
  1132. err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
  1133. &inet_sk(sk)->cork.base);
  1134. out:
  1135. up->len = 0;
  1136. up->pending = 0;
  1137. return err;
  1138. }
  1139. int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
  1140. {
  1141. struct ipv6_txoptions opt_space;
  1142. struct udp_sock *up = udp_sk(sk);
  1143. struct inet_sock *inet = inet_sk(sk);
  1144. struct ipv6_pinfo *np = inet6_sk(sk);
  1145. DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
  1146. struct in6_addr *daddr, *final_p, final;
  1147. struct ipv6_txoptions *opt = NULL;
  1148. struct ipv6_txoptions *opt_to_free = NULL;
  1149. struct ip6_flowlabel *flowlabel = NULL;
  1150. struct inet_cork_full cork;
  1151. struct flowi6 *fl6 = &cork.fl.u.ip6;
  1152. struct dst_entry *dst;
  1153. struct ipcm6_cookie ipc6;
  1154. int addr_len = msg->msg_namelen;
  1155. bool connected = false;
  1156. int ulen = len;
  1157. int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
  1158. int err;
  1159. int is_udplite = IS_UDPLITE(sk);
  1160. int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
  1161. ipcm6_init(&ipc6);
  1162. ipc6.gso_size = READ_ONCE(up->gso_size);
  1163. ipc6.sockc.tsflags = sk->sk_tsflags;
  1164. ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
  1165. /* destination address check */
  1166. if (sin6) {
  1167. if (addr_len < offsetof(struct sockaddr, sa_data))
  1168. return -EINVAL;
  1169. switch (sin6->sin6_family) {
  1170. case AF_INET6:
  1171. if (addr_len < SIN6_LEN_RFC2133)
  1172. return -EINVAL;
  1173. daddr = &sin6->sin6_addr;
  1174. if (ipv6_addr_any(daddr) &&
  1175. ipv6_addr_v4mapped(&np->saddr))
  1176. ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
  1177. daddr);
  1178. break;
  1179. case AF_INET:
  1180. goto do_udp_sendmsg;
  1181. case AF_UNSPEC:
  1182. msg->msg_name = sin6 = NULL;
  1183. msg->msg_namelen = addr_len = 0;
  1184. daddr = NULL;
  1185. break;
  1186. default:
  1187. return -EINVAL;
  1188. }
  1189. } else if (!up->pending) {
  1190. if (sk->sk_state != TCP_ESTABLISHED)
  1191. return -EDESTADDRREQ;
  1192. daddr = &sk->sk_v6_daddr;
  1193. } else
  1194. daddr = NULL;
  1195. if (daddr) {
  1196. if (ipv6_addr_v4mapped(daddr)) {
  1197. struct sockaddr_in sin;
  1198. sin.sin_family = AF_INET;
  1199. sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
  1200. sin.sin_addr.s_addr = daddr->s6_addr32[3];
  1201. msg->msg_name = &sin;
  1202. msg->msg_namelen = sizeof(sin);
  1203. do_udp_sendmsg:
  1204. err = ipv6_only_sock(sk) ?
  1205. -ENETUNREACH : udp_sendmsg(sk, msg, len);
  1206. msg->msg_name = sin6;
  1207. msg->msg_namelen = addr_len;
  1208. return err;
  1209. }
  1210. }
  1211. /* Rough check on arithmetic overflow,
  1212. better check is made in ip6_append_data().
  1213. */
  1214. if (len > INT_MAX - sizeof(struct udphdr))
  1215. return -EMSGSIZE;
  1216. getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
  1217. if (up->pending) {
  1218. if (up->pending == AF_INET)
  1219. return udp_sendmsg(sk, msg, len);
  1220. /*
  1221. * There are pending frames.
  1222. * The socket lock must be held while it's corked.
  1223. */
  1224. lock_sock(sk);
  1225. if (likely(up->pending)) {
  1226. if (unlikely(up->pending != AF_INET6)) {
  1227. release_sock(sk);
  1228. return -EAFNOSUPPORT;
  1229. }
  1230. dst = NULL;
  1231. goto do_append_data;
  1232. }
  1233. release_sock(sk);
  1234. }
  1235. ulen += sizeof(struct udphdr);
  1236. memset(fl6, 0, sizeof(*fl6));
  1237. if (sin6) {
  1238. if (sin6->sin6_port == 0)
  1239. return -EINVAL;
  1240. fl6->fl6_dport = sin6->sin6_port;
  1241. daddr = &sin6->sin6_addr;
  1242. if (np->sndflow) {
  1243. fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
  1244. if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
  1245. flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
  1246. if (IS_ERR(flowlabel))
  1247. return -EINVAL;
  1248. }
  1249. }
  1250. /*
  1251. * Otherwise it will be difficult to maintain
  1252. * sk->sk_dst_cache.
  1253. */
  1254. if (sk->sk_state == TCP_ESTABLISHED &&
  1255. ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
  1256. daddr = &sk->sk_v6_daddr;
  1257. if (addr_len >= sizeof(struct sockaddr_in6) &&
  1258. sin6->sin6_scope_id &&
  1259. __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
  1260. fl6->flowi6_oif = sin6->sin6_scope_id;
  1261. } else {
  1262. if (sk->sk_state != TCP_ESTABLISHED)
  1263. return -EDESTADDRREQ;
  1264. fl6->fl6_dport = inet->inet_dport;
  1265. daddr = &sk->sk_v6_daddr;
  1266. fl6->flowlabel = np->flow_label;
  1267. connected = true;
  1268. }
  1269. if (!fl6->flowi6_oif)
  1270. fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
  1271. if (!fl6->flowi6_oif)
  1272. fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
  1273. fl6->flowi6_uid = sk->sk_uid;
  1274. if (msg->msg_controllen) {
  1275. opt = &opt_space;
  1276. memset(opt, 0, sizeof(struct ipv6_txoptions));
  1277. opt->tot_len = sizeof(*opt);
  1278. ipc6.opt = opt;
  1279. err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
  1280. if (err > 0)
  1281. err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
  1282. &ipc6);
  1283. if (err < 0) {
  1284. fl6_sock_release(flowlabel);
  1285. return err;
  1286. }
  1287. if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
  1288. flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
  1289. if (IS_ERR(flowlabel))
  1290. return -EINVAL;
  1291. }
  1292. if (!(opt->opt_nflen|opt->opt_flen))
  1293. opt = NULL;
  1294. connected = false;
  1295. }
  1296. if (!opt) {
  1297. opt = txopt_get(np);
  1298. opt_to_free = opt;
  1299. }
  1300. if (flowlabel)
  1301. opt = fl6_merge_options(&opt_space, flowlabel, opt);
  1302. opt = ipv6_fixup_options(&opt_space, opt);
  1303. ipc6.opt = opt;
  1304. fl6->flowi6_proto = sk->sk_protocol;
  1305. fl6->flowi6_mark = ipc6.sockc.mark;
  1306. fl6->daddr = *daddr;
  1307. if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
  1308. fl6->saddr = np->saddr;
  1309. fl6->fl6_sport = inet->inet_sport;
  1310. if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
  1311. err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
  1312. (struct sockaddr *)sin6,
  1313. &fl6->saddr);
  1314. if (err)
  1315. goto out_no_dst;
  1316. if (sin6) {
  1317. if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
  1318. /* BPF program rewrote IPv6-only by IPv4-mapped
  1319. * IPv6. It's currently unsupported.
  1320. */
  1321. err = -ENOTSUPP;
  1322. goto out_no_dst;
  1323. }
  1324. if (sin6->sin6_port == 0) {
  1325. /* BPF program set invalid port. Reject it. */
  1326. err = -EINVAL;
  1327. goto out_no_dst;
  1328. }
  1329. fl6->fl6_dport = sin6->sin6_port;
  1330. fl6->daddr = sin6->sin6_addr;
  1331. }
  1332. }
  1333. if (ipv6_addr_any(&fl6->daddr))
  1334. fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
  1335. final_p = fl6_update_dst(fl6, opt, &final);
  1336. if (final_p)
  1337. connected = false;
  1338. if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
  1339. fl6->flowi6_oif = np->mcast_oif;
  1340. connected = false;
  1341. } else if (!fl6->flowi6_oif)
  1342. fl6->flowi6_oif = np->ucast_oif;
  1343. security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
  1344. if (ipc6.tclass < 0)
  1345. ipc6.tclass = np->tclass;
  1346. fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
  1347. dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
  1348. if (IS_ERR(dst)) {
  1349. err = PTR_ERR(dst);
  1350. dst = NULL;
  1351. goto out;
  1352. }
  1353. if (ipc6.hlimit < 0)
  1354. ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
  1355. if (msg->msg_flags&MSG_CONFIRM)
  1356. goto do_confirm;
  1357. back_from_confirm:
  1358. /* Lockless fast path for the non-corking case */
  1359. if (!corkreq) {
  1360. struct sk_buff *skb;
  1361. skb = ip6_make_skb(sk, getfrag, msg, ulen,
  1362. sizeof(struct udphdr), &ipc6,
  1363. (struct rt6_info *)dst,
  1364. msg->msg_flags, &cork);
  1365. err = PTR_ERR(skb);
  1366. if (!IS_ERR_OR_NULL(skb))
  1367. err = udp_v6_send_skb(skb, fl6, &cork.base);
  1368. /* ip6_make_skb steals dst reference */
  1369. goto out_no_dst;
  1370. }
  1371. lock_sock(sk);
  1372. if (unlikely(up->pending)) {
  1373. /* The socket is already corked while preparing it. */
  1374. /* ... which is an evident application bug. --ANK */
  1375. release_sock(sk);
  1376. net_dbg_ratelimited("udp cork app bug 2\n");
  1377. err = -EINVAL;
  1378. goto out;
  1379. }
  1380. up->pending = AF_INET6;
  1381. do_append_data:
  1382. if (ipc6.dontfrag < 0)
  1383. ipc6.dontfrag = np->dontfrag;
  1384. up->len += ulen;
  1385. err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
  1386. &ipc6, fl6, (struct rt6_info *)dst,
  1387. corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
  1388. if (err)
  1389. udp_v6_flush_pending_frames(sk);
  1390. else if (!corkreq)
  1391. err = udp_v6_push_pending_frames(sk);
  1392. else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
  1393. up->pending = 0;
  1394. if (err > 0)
  1395. err = np->recverr ? net_xmit_errno(err) : 0;
  1396. release_sock(sk);
  1397. out:
  1398. dst_release(dst);
  1399. out_no_dst:
  1400. fl6_sock_release(flowlabel);
  1401. txopt_put(opt_to_free);
  1402. if (!err)
  1403. return len;
  1404. /*
  1405. * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
  1406. * ENOBUFS might not be good (it's not tunable per se), but otherwise
  1407. * we don't have a good statistic (IpOutDiscards but it can be too many
  1408. * things). We could add another new stat but at least for now that
  1409. * seems like overkill.
  1410. */
  1411. if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
  1412. UDP6_INC_STATS(sock_net(sk),
  1413. UDP_MIB_SNDBUFERRORS, is_udplite);
  1414. }
  1415. return err;
  1416. do_confirm:
  1417. if (msg->msg_flags & MSG_PROBE)
  1418. dst_confirm_neigh(dst, &fl6->daddr);
  1419. if (!(msg->msg_flags&MSG_PROBE) || len)
  1420. goto back_from_confirm;
  1421. err = 0;
  1422. goto out;
  1423. }
  1424. void udpv6_destroy_sock(struct sock *sk)
  1425. {
  1426. struct udp_sock *up = udp_sk(sk);
  1427. lock_sock(sk);
  1428. /* protects from races with udp_abort() */
  1429. sock_set_flag(sk, SOCK_DEAD);
  1430. udp_v6_flush_pending_frames(sk);
  1431. release_sock(sk);
  1432. if (static_branch_unlikely(&udpv6_encap_needed_key)) {
  1433. if (up->encap_type) {
  1434. void (*encap_destroy)(struct sock *sk);
  1435. encap_destroy = READ_ONCE(up->encap_destroy);
  1436. if (encap_destroy)
  1437. encap_destroy(sk);
  1438. }
  1439. if (up->encap_enabled) {
  1440. static_branch_dec(&udpv6_encap_needed_key);
  1441. udp_encap_disable();
  1442. }
  1443. }
  1444. }
  1445. /*
  1446. * Socket option code for UDP
  1447. */
  1448. int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
  1449. unsigned int optlen)
  1450. {
  1451. if (level == SOL_UDP || level == SOL_UDPLITE)
  1452. return udp_lib_setsockopt(sk, level, optname,
  1453. optval, optlen,
  1454. udp_v6_push_pending_frames);
  1455. return ipv6_setsockopt(sk, level, optname, optval, optlen);
  1456. }
  1457. int udpv6_getsockopt(struct sock *sk, int level, int optname,
  1458. char __user *optval, int __user *optlen)
  1459. {
  1460. if (level == SOL_UDP || level == SOL_UDPLITE)
  1461. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  1462. return ipv6_getsockopt(sk, level, optname, optval, optlen);
  1463. }
  1464. static const struct inet6_protocol udpv6_protocol = {
  1465. .handler = udpv6_rcv,
  1466. .err_handler = udpv6_err,
  1467. .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
  1468. };
  1469. /* ------------------------------------------------------------------------ */
  1470. #ifdef CONFIG_PROC_FS
  1471. int udp6_seq_show(struct seq_file *seq, void *v)
  1472. {
  1473. if (v == SEQ_START_TOKEN) {
  1474. seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
  1475. } else {
  1476. int bucket = ((struct udp_iter_state *)seq->private)->bucket;
  1477. struct inet_sock *inet = inet_sk(v);
  1478. __u16 srcp = ntohs(inet->inet_sport);
  1479. __u16 destp = ntohs(inet->inet_dport);
  1480. __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
  1481. udp_rqueue_get(v), bucket);
  1482. }
  1483. return 0;
  1484. }
  1485. const struct seq_operations udp6_seq_ops = {
  1486. .start = udp_seq_start,
  1487. .next = udp_seq_next,
  1488. .stop = udp_seq_stop,
  1489. .show = udp6_seq_show,
  1490. };
  1491. EXPORT_SYMBOL(udp6_seq_ops);
  1492. static struct udp_seq_afinfo udp6_seq_afinfo = {
  1493. .family = AF_INET6,
  1494. .udp_table = &udp_table,
  1495. };
  1496. int __net_init udp6_proc_init(struct net *net)
  1497. {
  1498. if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
  1499. sizeof(struct udp_iter_state), &udp6_seq_afinfo))
  1500. return -ENOMEM;
  1501. return 0;
  1502. }
  1503. void udp6_proc_exit(struct net *net)
  1504. {
  1505. remove_proc_entry("udp6", net->proc_net);
  1506. }
  1507. #endif /* CONFIG_PROC_FS */
  1508. /* ------------------------------------------------------------------------ */
  1509. struct proto udpv6_prot = {
  1510. .name = "UDPv6",
  1511. .owner = THIS_MODULE,
  1512. .close = udp_lib_close,
  1513. .pre_connect = udpv6_pre_connect,
  1514. .connect = ip6_datagram_connect,
  1515. .disconnect = udp_disconnect,
  1516. .ioctl = udp_ioctl,
  1517. .init = udpv6_init_sock,
  1518. .destroy = udpv6_destroy_sock,
  1519. .setsockopt = udpv6_setsockopt,
  1520. .getsockopt = udpv6_getsockopt,
  1521. .sendmsg = udpv6_sendmsg,
  1522. .recvmsg = udpv6_recvmsg,
  1523. .release_cb = ip6_datagram_release_cb,
  1524. .hash = udp_lib_hash,
  1525. .unhash = udp_lib_unhash,
  1526. .rehash = udp_v6_rehash,
  1527. .get_port = udp_v6_get_port,
  1528. .put_port = udp_lib_unhash,
  1529. #ifdef CONFIG_BPF_SYSCALL
  1530. .psock_update_sk_prot = udp_bpf_update_proto,
  1531. #endif
  1532. .memory_allocated = &udp_memory_allocated,
  1533. .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
  1534. .sysctl_mem = sysctl_udp_mem,
  1535. .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
  1536. .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
  1537. .obj_size = sizeof(struct udp6_sock),
  1538. .h.udp_table = &udp_table,
  1539. .diag_destroy = udp_abort,
  1540. };
  1541. static struct inet_protosw udpv6_protosw = {
  1542. .type = SOCK_DGRAM,
  1543. .protocol = IPPROTO_UDP,
  1544. .prot = &udpv6_prot,
  1545. .ops = &inet6_dgram_ops,
  1546. .flags = INET_PROTOSW_PERMANENT,
  1547. };
  1548. int __init udpv6_init(void)
  1549. {
  1550. int ret;
  1551. ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
  1552. if (ret)
  1553. goto out;
  1554. ret = inet6_register_protosw(&udpv6_protosw);
  1555. if (ret)
  1556. goto out_udpv6_protocol;
  1557. out:
  1558. return ret;
  1559. out_udpv6_protocol:
  1560. inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
  1561. goto out;
  1562. }
  1563. void udpv6_exit(void)
  1564. {
  1565. inet6_unregister_protosw(&udpv6_protosw);
  1566. inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
  1567. }